summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/.vpython15
-rw-r--r--deps/v8/BUILD.bazel11
-rw-r--r--deps/v8/BUILD.gn190
-rw-r--r--deps/v8/DEPS57
-rw-r--r--deps/v8/ENG_REVIEW_OWNERS1
-rw-r--r--deps/v8/RISCV_OWNERS1
-rw-r--r--deps/v8/base/trace_event/common/trace_event_common.h5
-rw-r--r--deps/v8/include/cppgc/internal/persistent-node.h56
-rw-r--r--deps/v8/include/cppgc/internal/pointer-policies.h33
-rw-r--r--deps/v8/include/cppgc/internal/write-barrier.h10
-rw-r--r--deps/v8/include/cppgc/persistent.h2
-rw-r--r--deps/v8/include/v8-callbacks.h3
-rw-r--r--deps/v8/include/v8-fast-api-calls.h68
-rw-r--r--deps/v8/include/v8-forward.h8
-rw-r--r--deps/v8/include/v8-initialization.h34
-rw-r--r--deps/v8/include/v8-internal.h29
-rw-r--r--deps/v8/include/v8-isolate.h3
-rw-r--r--deps/v8/include/v8-locker.h9
-rw-r--r--deps/v8/include/v8-message.h9
-rw-r--r--deps/v8/include/v8-script.h2
-rw-r--r--deps/v8/include/v8-template.h1
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/infra/mb/mb_config.pyl11
-rw-r--r--deps/v8/infra/testing/builders.pyl216
-rw-r--r--deps/v8/samples/shell.cc4
-rw-r--r--deps/v8/src/api/api-inl.h17
-rw-r--r--deps/v8/src/api/api.cc96
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc28
-rw-r--r--deps/v8/src/ast/ast.cc7
-rw-r--r--deps/v8/src/ast/prettyprinter.cc9
-rw-r--r--deps/v8/src/ast/prettyprinter.h2
-rw-r--r--deps/v8/src/ast/scopes.cc13
-rw-r--r--deps/v8/src/base/bounded-page-allocator.cc50
-rw-r--r--deps/v8/src/base/bounded-page-allocator.h17
-rw-r--r--deps/v8/src/base/macros.h12
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc7
-rw-r--r--deps/v8/src/base/vlq.h2
-rw-r--r--deps/v8/src/baseline/baseline-batch-compiler.cc1
-rw-r--r--deps/v8/src/baseline/bytecode-offset-iterator.cc2
-rw-r--r--deps/v8/src/baseline/bytecode-offset-iterator.h2
-rw-r--r--deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h2
-rw-r--r--deps/v8/src/bigint/bigint.h58
-rw-r--r--deps/v8/src/bigint/bitwise.cc262
-rw-r--r--deps/v8/src/bigint/mul-fft.cc2
-rw-r--r--deps/v8/src/bigint/vector-arithmetic.cc17
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc99
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc107
-rw-r--r--deps/v8/src/builtins/array-filter.tq2
-rw-r--r--deps/v8/src/builtins/array-from.tq2
-rw-r--r--deps/v8/src/builtins/array-join.tq17
-rw-r--r--deps/v8/src/builtins/array-lastindexof.tq6
-rw-r--r--deps/v8/src/builtins/array-map.tq7
-rw-r--r--deps/v8/src/builtins/array-reverse.tq15
-rw-r--r--deps/v8/src/builtins/array-slice.tq49
-rw-r--r--deps/v8/src/builtins/array.tq13
-rw-r--r--deps/v8/src/builtins/arraybuffer.tq4
-rw-r--r--deps/v8/src/builtins/base.tq75
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc59
-rw-r--r--deps/v8/src/builtins/builtins-array.cc12
-rw-r--r--deps/v8/src/builtins/builtins-arraybuffer.cc53
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc8
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc14
-rw-r--r--deps/v8/src/builtins/builtins-async-iterator-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-bigint.cc8
-rw-r--r--deps/v8/src/builtins/builtins-bigint.tq6
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc74
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc12
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc62
-rw-r--r--deps/v8/src/builtins/builtins-date.cc88
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h2
-rw-r--r--deps/v8/src/builtins/builtins-generator-gen.cc20
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc21
-rw-r--r--deps/v8/src/builtins/builtins-intl-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc119
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-microtask-queue-gen.cc8
-rw-r--r--deps/v8/src/builtins/builtins-number.cc6
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc43
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc14
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc118
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc41
-rw-r--r--deps/v8/src/builtins/builtins-string.cc18
-rw-r--r--deps/v8/src/builtins/builtins-string.tq6
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc46
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h4
-rw-r--r--deps/v8/src/builtins/builtins-typed-array.cc84
-rw-r--r--deps/v8/src/builtins/builtins.cc8
-rw-r--r--deps/v8/src/builtins/cast.tq6
-rw-r--r--deps/v8/src/builtins/convert.tq6
-rw-r--r--deps/v8/src/builtins/data-view.tq24
-rw-r--r--deps/v8/src/builtins/finalization-registry.tq14
-rw-r--r--deps/v8/src/builtins/frame-arguments.tq2
-rw-r--r--deps/v8/src/builtins/frames.tq2
-rw-r--r--deps/v8/src/builtins/function.tq3
-rw-r--r--deps/v8/src/builtins/growable-fixed-array-gen.cc8
-rw-r--r--deps/v8/src/builtins/growable-fixed-array.tq12
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc97
-rw-r--r--deps/v8/src/builtins/ic-callable.tq22
-rw-r--r--deps/v8/src/builtins/ic-dynamic-check-maps.tq6
-rw-r--r--deps/v8/src/builtins/ic.tq3
-rw-r--r--deps/v8/src/builtins/internal-coverage.tq4
-rw-r--r--deps/v8/src/builtins/internal.tq10
-rw-r--r--deps/v8/src/builtins/iterator.tq8
-rw-r--r--deps/v8/src/builtins/math.tq2
-rw-r--r--deps/v8/src/builtins/number.tq12
-rw-r--r--deps/v8/src/builtins/object-fromentries.tq2
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc100
-rw-r--r--deps/v8/src/builtins/promise-abstract-operations.tq4
-rw-r--r--deps/v8/src/builtins/promise-all-element-closure.tq6
-rw-r--r--deps/v8/src/builtins/promise-all.tq8
-rw-r--r--deps/v8/src/builtins/promise-any.tq16
-rw-r--r--deps/v8/src/builtins/promise-finally.tq6
-rw-r--r--deps/v8/src/builtins/promise-misc.tq25
-rw-r--r--deps/v8/src/builtins/promise-race.tq2
-rw-r--r--deps/v8/src/builtins/promise-resolve.tq4
-rw-r--r--deps/v8/src/builtins/proxy-delete-property.tq8
-rw-r--r--deps/v8/src/builtins/proxy-get-property.tq6
-rw-r--r--deps/v8/src/builtins/proxy-get-prototype-of.tq4
-rw-r--r--deps/v8/src/builtins/proxy-has-property.tq8
-rw-r--r--deps/v8/src/builtins/proxy-is-extensible.tq2
-rw-r--r--deps/v8/src/builtins/proxy-prevent-extensions.tq4
-rw-r--r--deps/v8/src/builtins/proxy-revoke.tq2
-rw-r--r--deps/v8/src/builtins/proxy-set-property.tq8
-rw-r--r--deps/v8/src/builtins/proxy-set-prototype-of.tq6
-rw-r--r--deps/v8/src/builtins/proxy.tq6
-rw-r--r--deps/v8/src/builtins/regexp-match-all.tq8
-rw-r--r--deps/v8/src/builtins/regexp-match.tq10
-rw-r--r--deps/v8/src/builtins/regexp-replace.tq9
-rw-r--r--deps/v8/src/builtins/regexp-search.tq2
-rw-r--r--deps/v8/src/builtins/regexp.tq6
-rw-r--r--deps/v8/src/builtins/riscv64/builtins-riscv64.cc14
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc100
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc2
-rw-r--r--deps/v8/src/builtins/string-pad.tq10
-rw-r--r--deps/v8/src/builtins/string-repeat.tq6
-rw-r--r--deps/v8/src/builtins/string-substr.tq2
-rw-r--r--deps/v8/src/builtins/torque-csa-header-includes.h1
-rw-r--r--deps/v8/src/builtins/torque-internal.tq26
-rw-r--r--deps/v8/src/builtins/typed-array-at.tq9
-rw-r--r--deps/v8/src/builtins/typed-array-createtypedarray.tq18
-rw-r--r--deps/v8/src/builtins/typed-array-every.tq27
-rw-r--r--deps/v8/src/builtins/typed-array-set.tq4
-rw-r--r--deps/v8/src/builtins/typed-array-slice.tq40
-rw-r--r--deps/v8/src/builtins/typed-array-some.tq30
-rw-r--r--deps/v8/src/builtins/typed-array-sort.tq6
-rw-r--r--deps/v8/src/builtins/typed-array.tq52
-rw-r--r--deps/v8/src/builtins/wasm.tq9
-rw-r--r--deps/v8/src/builtins/weak-ref.tq3
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc87
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm-inl.h2
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc44
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h7
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64-inl.h4
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h13
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc109
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h13
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h2
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc518
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h133
-rw-r--r--deps/v8/src/codegen/external-reference.cc37
-rw-r--r--deps/v8/src/codegen/external-reference.h15
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32-inl.h2
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc165
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.h167
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc134
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h68
-rw-r--r--deps/v8/src/codegen/ia32/sse-instr.h19
-rw-r--r--deps/v8/src/codegen/loong64/assembler-loong64-inl.h2
-rw-r--r--deps/v8/src/codegen/loong64/macro-assembler-loong64.cc15
-rw-r--r--deps/v8/src/codegen/loong64/macro-assembler-loong64.h17
-rw-r--r--deps/v8/src/codegen/macro-assembler.h12
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips-inl.h2
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc12
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.h2
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64-inl.h2
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc16
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h17
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc-inl.h4
-rw-r--r--deps/v8/src/codegen/ppc/constants-ppc.h2
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc91
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h189
-rw-r--r--deps/v8/src/codegen/reloc-info.h5
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h4
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.cc101
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.h89
-rw-r--r--deps/v8/src/codegen/riscv64/constants-riscv64.h80
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc111
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h26
-rw-r--r--deps/v8/src/codegen/riscv64/register-riscv64.h6
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390-inl.h4
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc365
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h17
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc186
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h154
-rw-r--r--deps/v8/src/codegen/source-position.h2
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64-inl.h4
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc80
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h151
-rw-r--r--deps/v8/src/codegen/x64/cpu-x64.cc2
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc255
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h99
-rw-r--r--deps/v8/src/common/globals.h2
-rw-r--r--deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc2
-rw-r--r--deps/v8/src/compiler/access-info.cc9
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc11
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-codes-arm.h707
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc4
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc65
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h669
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc2
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc50
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc332
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h705
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc88
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc160
-rw-r--r--deps/v8/src/compiler/backend/instruction-codes.h43
-rw-r--r--deps/v8/src/compiler/backend/instruction-scheduler.cc8
-rw-r--r--deps/v8/src/compiler/backend/instruction-scheduler.h8
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc24
-rw-r--r--deps/v8/src/compiler/backend/instruction.cc38
-rw-r--r--deps/v8/src/compiler/backend/instruction.h7
-rw-r--r--deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc6
-rw-r--r--deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h723
-rw-r--r--deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc40
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc6
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-codes-mips.h731
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc2
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc4
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc18
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h779
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc2
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc103
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc219
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h805
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc4
-rw-r--r--deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc204
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h780
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc2
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc9
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc19
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-codes-s390.h777
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc4
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc98
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h771
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc52
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc126
-rw-r--r--deps/v8/src/compiler/branch-elimination.h5
-rw-r--r--deps/v8/src/compiler/c-linkage.cc2
-rw-r--r--deps/v8/src/compiler/code-assembler.cc10
-rw-r--r--deps/v8/src/compiler/code-assembler.h2
-rw-r--r--deps/v8/src/compiler/csa-load-elimination.cc2
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc14
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc15
-rw-r--r--deps/v8/src/compiler/escape-analysis.h5
-rw-r--r--deps/v8/src/compiler/globals.h3
-rw-r--r--deps/v8/src/compiler/heap-refs.cc13
-rw-r--r--deps/v8/src/compiler/heap-refs.h2
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc13
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc2
-rw-r--r--deps/v8/src/compiler/js-inlining.cc15
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc4
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc19
-rw-r--r--deps/v8/src/compiler/linkage.cc12
-rw-r--r--deps/v8/src/compiler/linkage.h25
-rw-r--r--deps/v8/src/compiler/loop-unrolling.cc10
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc2
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc15
-rw-r--r--deps/v8/src/compiler/machine-operator.cc14
-rw-r--r--deps/v8/src/compiler/machine-operator.h6
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc2
-rw-r--r--deps/v8/src/compiler/node-matchers.h9
-rw-r--r--deps/v8/src/compiler/opcodes.h2
-rw-r--r--deps/v8/src/compiler/pipeline.cc28
-rw-r--r--deps/v8/src/compiler/pipeline.h3
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc18
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h2
-rw-r--r--deps/v8/src/compiler/scheduler.cc23
-rw-r--r--deps/v8/src/compiler/types.cc1
-rw-r--r--deps/v8/src/compiler/types.h4
-rw-r--r--deps/v8/src/compiler/verifier.cc2
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc123
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h11
-rw-r--r--deps/v8/src/compiler/wasm-inlining.cc184
-rw-r--r--deps/v8/src/compiler/wasm-inlining.h53
-rw-r--r--deps/v8/src/d8/d8-posix.cc10
-rw-r--r--deps/v8/src/d8/d8-test.cc10
-rw-r--r--deps/v8/src/d8/d8.cc38
-rw-r--r--deps/v8/src/date/dateparser.h3
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc97
-rw-r--r--deps/v8/src/debug/debug-evaluate.h1
-rw-r--r--deps/v8/src/debug/debug-interface.cc12
-rw-r--r--deps/v8/src/debug/debug-interface.h3
-rw-r--r--deps/v8/src/debug/debug-property-iterator.cc75
-rw-r--r--deps/v8/src/debug/debug-property-iterator.h21
-rw-r--r--deps/v8/src/debug/debug.cc25
-rw-r--r--deps/v8/src/diagnostics/arm/disasm-arm.cc4
-rw-r--r--deps/v8/src/diagnostics/ia32/disasm-ia32.cc101
-rw-r--r--deps/v8/src/diagnostics/loong64/disasm-loong64.cc612
-rw-r--r--deps/v8/src/diagnostics/objects-debug.cc53
-rw-r--r--deps/v8/src/diagnostics/objects-printer.cc202
-rw-r--r--deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc128
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.cc30
-rw-r--r--deps/v8/src/diagnostics/x64/disasm-x64.cc54
-rw-r--r--deps/v8/src/execution/arguments-inl.h9
-rw-r--r--deps/v8/src/execution/arguments.h16
-rw-r--r--deps/v8/src/execution/arm/simulator-arm.cc13
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.cc11
-rw-r--r--deps/v8/src/execution/execution.cc10
-rw-r--r--deps/v8/src/execution/frames.cc37
-rw-r--r--deps/v8/src/execution/frames.h2
-rw-r--r--deps/v8/src/execution/futex-emulation.cc15
-rw-r--r--deps/v8/src/execution/isolate.cc139
-rw-r--r--deps/v8/src/execution/isolate.h34
-rw-r--r--deps/v8/src/execution/local-isolate-inl.h5
-rw-r--r--deps/v8/src/execution/local-isolate.h1
-rw-r--r--deps/v8/src/execution/messages.cc9
-rw-r--r--deps/v8/src/execution/ppc/simulator-ppc.cc19
-rw-r--r--deps/v8/src/execution/riscv64/simulator-riscv64.cc569
-rw-r--r--deps/v8/src/execution/riscv64/simulator-riscv64.h32
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.cc13
-rw-r--r--deps/v8/src/execution/v8threads.cc10
-rw-r--r--deps/v8/src/flags/flag-definitions.h26
-rw-r--r--deps/v8/src/flags/flags.cc68
-rw-r--r--deps/v8/src/flags/flags.h11
-rw-r--r--deps/v8/src/handles/global-handles-inl.h33
-rw-r--r--deps/v8/src/handles/global-handles.h14
-rw-r--r--deps/v8/src/heap/array-buffer-sweeper.cc235
-rw-r--r--deps/v8/src/heap/array-buffer-sweeper.h118
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc46
-rw-r--r--deps/v8/src/heap/concurrent-marking.h2
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.cc4
-rw-r--r--deps/v8/src/heap/cppgc/allocation.cc14
-rw-r--r--deps/v8/src/heap/cppgc/caged-heap.cc6
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.h4
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header.h8
-rw-r--r--deps/v8/src/heap/cppgc/heap-statistics-collector.cc4
-rw-r--r--deps/v8/src/heap/cppgc/heap-statistics-collector.h4
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc46
-rw-r--r--deps/v8/src/heap/cppgc/marker.h3
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.h23
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.cc8
-rw-r--r--deps/v8/src/heap/cppgc/persistent-node.cc37
-rw-r--r--deps/v8/src/heap/cppgc/pointer-policies.cc38
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.cc2
-rw-r--r--deps/v8/src/heap/factory-base.cc9
-rw-r--r--deps/v8/src/heap/factory.cc26
-rw-r--r--deps/v8/src/heap/gc-tracer.cc13
-rw-r--r--deps/v8/src/heap/gc-tracer.h44
-rw-r--r--deps/v8/src/heap/heap-inl.h10
-rw-r--r--deps/v8/src/heap/heap.cc193
-rw-r--r--deps/v8/src/heap/heap.h26
-rw-r--r--deps/v8/src/heap/large-spaces.cc38
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h23
-rw-r--r--deps/v8/src/heap/mark-compact.cc326
-rw-r--r--deps/v8/src/heap/mark-compact.h24
-rw-r--r--deps/v8/src/heap/marking-barrier-inl.h15
-rw-r--r--deps/v8/src/heap/marking-barrier.cc32
-rw-r--r--deps/v8/src/heap/marking-barrier.h3
-rw-r--r--deps/v8/src/heap/marking-visitor-inl.h22
-rw-r--r--deps/v8/src/heap/marking-visitor.h16
-rw-r--r--deps/v8/src/heap/memory-chunk.cc10
-rw-r--r--deps/v8/src/heap/memory-chunk.h12
-rw-r--r--deps/v8/src/heap/memory-measurement.cc1
-rw-r--r--deps/v8/src/heap/object-stats.cc2
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h20
-rw-r--r--deps/v8/src/heap/objects-visiting.h10
-rw-r--r--deps/v8/src/heap/paged-spaces.cc24
-rw-r--r--deps/v8/src/heap/paged-spaces.h2
-rw-r--r--deps/v8/src/heap/safepoint.cc11
-rw-r--r--deps/v8/src/heap/scavenger-inl.h8
-rw-r--r--deps/v8/src/heap/scavenger.cc6
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc6
-rw-r--r--deps/v8/src/heap/sweeper.cc5
-rw-r--r--deps/v8/src/heap/weak-object-worklists.cc13
-rw-r--r--deps/v8/src/heap/weak-object-worklists.h4
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc438
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h65
-rw-r--r--deps/v8/src/ic/handler-configuration.cc8
-rw-r--r--deps/v8/src/ic/handler-configuration.h4
-rw-r--r--deps/v8/src/ic/ic.cc28
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc13
-rw-r--r--deps/v8/src/ic/unary-op-assembler.cc6
-rw-r--r--deps/v8/src/init/bootstrapper.cc49
-rw-r--r--deps/v8/src/init/bootstrapper.h3
-rw-r--r--deps/v8/src/init/isolate-allocator.cc18
-rw-r--r--deps/v8/src/init/v8.cc4
-rw-r--r--deps/v8/src/init/vm-cage.cc36
-rw-r--r--deps/v8/src/init/vm-cage.h81
-rw-r--r--deps/v8/src/inspector/injected-script.cc4
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc17
-rw-r--r--deps/v8/src/inspector/v8-debugger.h2
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc7
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.h3
-rw-r--r--deps/v8/src/inspector/value-mirror.cc31
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc57
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc12
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc26
-rw-r--r--deps/v8/src/json/json-parser.cc15
-rw-r--r--deps/v8/src/json/json-stringifier.cc9
-rw-r--r--deps/v8/src/logging/counters-definitions.h4
-rw-r--r--deps/v8/src/logging/log-utils.cc7
-rw-r--r--deps/v8/src/logging/log.cc9
-rw-r--r--deps/v8/src/logging/runtime-call-stats-scope.h6
-rw-r--r--deps/v8/src/logging/runtime-call-stats.cc22
-rw-r--r--deps/v8/src/numbers/conversions.cc37
-rw-r--r--deps/v8/src/objects/api-callbacks.tq3
-rw-r--r--deps/v8/src/objects/arguments.h1
-rw-r--r--deps/v8/src/objects/arguments.tq5
-rw-r--r--deps/v8/src/objects/backing-store.cc145
-rw-r--r--deps/v8/src/objects/backing-store.h41
-rw-r--r--deps/v8/src/objects/bigint.cc526
-rw-r--r--deps/v8/src/objects/bigint.tq3
-rw-r--r--deps/v8/src/objects/cell.tq5
-rw-r--r--deps/v8/src/objects/contexts.h1
-rw-r--r--deps/v8/src/objects/contexts.tq11
-rw-r--r--deps/v8/src/objects/data-handler.h1
-rw-r--r--deps/v8/src/objects/data-handler.tq8
-rw-r--r--deps/v8/src/objects/debug-objects.tq3
-rw-r--r--deps/v8/src/objects/descriptor-array-inl.h4
-rw-r--r--deps/v8/src/objects/descriptor-array.tq1
-rw-r--r--deps/v8/src/objects/elements-kind.h17
-rw-r--r--deps/v8/src/objects/elements.cc14
-rw-r--r--deps/v8/src/objects/embedder-data-array-inl.h2
-rw-r--r--deps/v8/src/objects/feedback-vector.cc7
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h2
-rw-r--r--deps/v8/src/objects/fixed-array.h13
-rw-r--r--deps/v8/src/objects/fixed-array.tq5
-rw-r--r--deps/v8/src/objects/heap-object.h1
-rw-r--r--deps/v8/src/objects/instance-type.h10
-rw-r--r--deps/v8/src/objects/intl-objects.cc247
-rw-r--r--deps/v8/src/objects/intl-objects.h93
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h6
-rw-r--r--deps/v8/src/objects/js-array-buffer.cc27
-rw-r--r--deps/v8/src/objects/js-array-buffer.h64
-rw-r--r--deps/v8/src/objects/js-array.h1
-rw-r--r--deps/v8/src/objects/js-array.tq22
-rw-r--r--deps/v8/src/objects/js-break-iterator.cc4
-rw-r--r--deps/v8/src/objects/js-break-iterator.h1
-rw-r--r--deps/v8/src/objects/js-collator.cc34
-rw-r--r--deps/v8/src/objects/js-date-time-format-inl.h2
-rw-r--r--deps/v8/src/objects/js-date-time-format.cc44
-rw-r--r--deps/v8/src/objects/js-date-time-format.h5
-rw-r--r--deps/v8/src/objects/js-date-time-format.tq9
-rw-r--r--deps/v8/src/objects/js-display-names.cc77
-rw-r--r--deps/v8/src/objects/js-function.cc1
-rw-r--r--deps/v8/src/objects/js-function.h1
-rw-r--r--deps/v8/src/objects/js-function.tq12
-rw-r--r--deps/v8/src/objects/js-list-format.cc13
-rw-r--r--deps/v8/src/objects/js-locale.cc39
-rw-r--r--deps/v8/src/objects/js-number-format.cc34
-rw-r--r--deps/v8/src/objects/js-objects-inl.h2
-rw-r--r--deps/v8/src/objects/js-objects.cc39
-rw-r--r--deps/v8/src/objects/js-objects.h1
-rw-r--r--deps/v8/src/objects/js-objects.tq2
-rw-r--r--deps/v8/src/objects/js-plural-rules.cc7
-rw-r--r--deps/v8/src/objects/js-promise.tq4
-rw-r--r--deps/v8/src/objects/js-proxy.h1
-rw-r--r--deps/v8/src/objects/js-regexp-inl.h64
-rw-r--r--deps/v8/src/objects/js-regexp.cc36
-rw-r--r--deps/v8/src/objects/js-regexp.h226
-rw-r--r--deps/v8/src/objects/js-relative-time-format.cc9
-rw-r--r--deps/v8/src/objects/js-segment-iterator.cc2
-rw-r--r--deps/v8/src/objects/js-segmenter.cc11
-rw-r--r--deps/v8/src/objects/js-segments.cc2
-rw-r--r--deps/v8/src/objects/js-weak-refs.h1
-rw-r--r--deps/v8/src/objects/keys.cc4
-rw-r--r--deps/v8/src/objects/literal-objects.h1
-rw-r--r--deps/v8/src/objects/lookup.cc17
-rw-r--r--deps/v8/src/objects/managed-inl.h64
-rw-r--r--deps/v8/src/objects/managed.cc2
-rw-r--r--deps/v8/src/objects/managed.h33
-rw-r--r--deps/v8/src/objects/map-inl.h8
-rw-r--r--deps/v8/src/objects/map-updater.cc59
-rw-r--r--deps/v8/src/objects/map-updater.h2
-rw-r--r--deps/v8/src/objects/map.cc40
-rw-r--r--deps/v8/src/objects/map.h2
-rw-r--r--deps/v8/src/objects/megadom-handler.tq1
-rw-r--r--deps/v8/src/objects/microtask.h4
-rw-r--r--deps/v8/src/objects/module.cc39
-rw-r--r--deps/v8/src/objects/module.h5
-rw-r--r--deps/v8/src/objects/name.tq10
-rw-r--r--deps/v8/src/objects/object-list-macros.h1
-rw-r--r--deps/v8/src/objects/objects-body-descriptors-inl.h3
-rw-r--r--deps/v8/src/objects/objects-inl.h23
-rw-r--r--deps/v8/src/objects/objects.cc2
-rw-r--r--deps/v8/src/objects/objects.h14
-rw-r--r--deps/v8/src/objects/option-utils.cc172
-rw-r--r--deps/v8/src/objects/option-utils.h95
-rw-r--r--deps/v8/src/objects/ordered-hash-table.tq5
-rw-r--r--deps/v8/src/objects/promise.h15
-rw-r--r--deps/v8/src/objects/property-array.h1
-rw-r--r--deps/v8/src/objects/property-cell.h1
-rw-r--r--deps/v8/src/objects/property-descriptor-object.tq1
-rw-r--r--deps/v8/src/objects/property-descriptor.cc4
-rw-r--r--deps/v8/src/objects/property-descriptor.h4
-rw-r--r--deps/v8/src/objects/property-details.h10
-rw-r--r--deps/v8/src/objects/property.cc10
-rw-r--r--deps/v8/src/objects/regexp-match-info.h1
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h4
-rw-r--r--deps/v8/src/objects/shared-function-info.h9
-rw-r--r--deps/v8/src/objects/source-text-module.h1
-rw-r--r--deps/v8/src/objects/source-text-module.tq1
-rw-r--r--deps/v8/src/objects/stack-frame-info.cc2
-rw-r--r--deps/v8/src/objects/stack-frame-info.h1
-rw-r--r--deps/v8/src/objects/string-inl.h141
-rw-r--r--deps/v8/src/objects/string-table.cc9
-rw-r--r--deps/v8/src/objects/string.cc264
-rw-r--r--deps/v8/src/objects/string.h51
-rw-r--r--deps/v8/src/objects/string.tq12
-rw-r--r--deps/v8/src/objects/struct.h4
-rw-r--r--deps/v8/src/objects/struct.tq2
-rw-r--r--deps/v8/src/objects/swiss-hash-table-helpers.tq6
-rw-r--r--deps/v8/src/objects/swiss-name-dictionary.tq29
-rw-r--r--deps/v8/src/objects/synthetic-module.h1
-rw-r--r--deps/v8/src/objects/tagged-field.h2
-rw-r--r--deps/v8/src/objects/template-objects.tq2
-rw-r--r--deps/v8/src/objects/templates.tq1
-rw-r--r--deps/v8/src/objects/transitions-inl.h3
-rw-r--r--deps/v8/src/objects/transitions.cc3
-rw-r--r--deps/v8/src/objects/value-serializer.cc8
-rw-r--r--deps/v8/src/objects/visitors-inl.h43
-rw-r--r--deps/v8/src/objects/visitors.h38
-rw-r--r--deps/v8/src/parsing/parser-base.h16
-rw-r--r--deps/v8/src/parsing/parser.h7
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc3
-rw-r--r--deps/v8/src/profiler/allocation-tracker.cc2
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc7
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc25
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h3
-rw-r--r--deps/v8/src/profiler/weak-code-registry.cc3
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc214
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h41
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc261
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h60
-rw-r--r--deps/v8/src/regexp/experimental/experimental.cc53
-rw-r--r--deps/v8/src/regexp/experimental/experimental.h1
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc213
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h35
-rw-r--r--deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc137
-rw-r--r--deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h61
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc118
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h41
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc134
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h62
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc210
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h46
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator-inl.h24
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator.cc20
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator.h7
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.cc8
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.h6
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc49
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h10
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc579
-rw-r--r--deps/v8/src/regexp/regexp-stack.cc23
-rw-r--r--deps/v8/src/regexp/regexp-stack.h67
-rw-r--r--deps/v8/src/regexp/regexp.cc45
-rw-r--r--deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc151
-rw-r--r--deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.h43
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc207
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h48
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc256
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h50
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc24
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc13
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc5
-rw-r--r--deps/v8/src/runtime/runtime-object.cc2
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc55
-rw-r--r--deps/v8/src/runtime/runtime-test.cc14
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc14
-rw-r--r--deps/v8/src/runtime/runtime.h2
-rw-r--r--deps/v8/src/snapshot/deserializer.cc2
-rw-r--r--deps/v8/src/snapshot/serializer.cc7
-rw-r--r--deps/v8/src/snapshot/serializer.h2
-rw-r--r--deps/v8/src/strings/string-builder.cc2
-rw-r--r--deps/v8/src/strings/string-stream.cc2
-rw-r--r--deps/v8/src/torque/ast.h20
-rw-r--r--deps/v8/src/torque/cc-generator.cc1
-rw-r--r--deps/v8/src/torque/constants.h30
-rw-r--r--deps/v8/src/torque/cpp-builder.cc9
-rw-r--r--deps/v8/src/torque/cpp-builder.h15
-rw-r--r--deps/v8/src/torque/csa-generator.cc1
-rw-r--r--deps/v8/src/torque/declarable.cc15
-rw-r--r--deps/v8/src/torque/declaration-visitor.cc58
-rw-r--r--deps/v8/src/torque/declarations.cc9
-rw-r--r--deps/v8/src/torque/declarations.h5
-rw-r--r--deps/v8/src/torque/earley-parser.cc8
-rw-r--r--deps/v8/src/torque/global-context.cc4
-rw-r--r--deps/v8/src/torque/global-context.h3
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc365
-rw-r--r--deps/v8/src/torque/implementation-visitor.h18
-rw-r--r--deps/v8/src/torque/kythe-data.cc187
-rw-r--r--deps/v8/src/torque/kythe-data.h110
-rw-r--r--deps/v8/src/torque/ls/message-handler.cc29
-rw-r--r--deps/v8/src/torque/source-positions.h17
-rw-r--r--deps/v8/src/torque/torque-compiler.cc37
-rw-r--r--deps/v8/src/torque/torque-compiler.h14
-rw-r--r--deps/v8/src/torque/torque-parser.cc149
-rw-r--r--deps/v8/src/torque/type-inference.cc4
-rw-r--r--deps/v8/src/torque/type-visitor.cc25
-rw-r--r--deps/v8/src/torque/types.cc21
-rw-r--r--deps/v8/src/torque/types.h14
-rw-r--r--deps/v8/src/torque/utils.h4
-rw-r--r--deps/v8/src/trap-handler/handler-inside-posix.cc2
-rw-r--r--deps/v8/src/trap-handler/handler-inside-win.cc54
-rw-r--r--deps/v8/src/trap-handler/handler-outside-simulator.cc10
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h5
-rw-r--r--deps/v8/src/utils/allocation.cc31
-rw-r--r--deps/v8/src/utils/allocation.h23
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h7
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h125
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h46
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc331
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h8
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h596
-rw-r--r--deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h147
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h80
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h103
-rw-r--r--deps/v8/src/wasm/c-api.cc4
-rw-r--r--deps/v8/src/wasm/compilation-environment.h16
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h384
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc11
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc314
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.h7
-rw-r--r--deps/v8/src/wasm/init-expr-interface.cc42
-rw-r--r--deps/v8/src/wasm/memory-protection-key.cc25
-rw-r--r--deps/v8/src/wasm/memory-protection-key.h4
-rw-r--r--deps/v8/src/wasm/module-compiler.cc104
-rw-r--r--deps/v8/src/wasm/module-decoder.cc163
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc127
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc18
-rw-r--r--deps/v8/src/wasm/value-type.h4
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc151
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h25
-rw-r--r--deps/v8/src/wasm/wasm-constants.h28
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc6
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc16
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc1
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.h1
-rw-r--r--deps/v8/src/wasm/wasm-feature-flags.h15
-rw-r--r--deps/v8/src/wasm/wasm-init-expr.cc4
-rw-r--r--deps/v8/src/wasm/wasm-init-expr.h42
-rw-r--r--deps/v8/src/wasm/wasm-js.cc257
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc80
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h23
-rw-r--r--deps/v8/src/wasm/wasm-module.cc29
-rw-r--r--deps/v8/src/wasm/wasm-module.h32
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h21
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc18
-rw-r--r--deps/v8/src/wasm/wasm-objects.h23
-rw-r--r--deps/v8/src/wasm/wasm-opcodes-inl.h9
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h13
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc4
-rw-r--r--deps/v8/src/wasm/wasm-subtyping.cc45
-rw-r--r--deps/v8/src/wasm/wasm-subtyping.h14
-rw-r--r--deps/v8/src/web-snapshot/web-snapshot.cc27
-rw-r--r--deps/v8/src/zone/accounting-allocator.cc3
-rw-r--r--deps/v8/src/zone/zone.cc60
-rw-r--r--deps/v8/src/zone/zone.h28
-rw-r--r--deps/v8/test/cctest/cctest.cc2
-rw-r--r--deps/v8/test/cctest/cctest.status44
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-branch-combine.cc96
-rw-r--r--deps/v8/test/cctest/compiler/test-calls-with-arraylike-or-spread.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-gap-resolver.cc24
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc8
-rw-r--r--deps/v8/test/cctest/compiler/test-loop-analysis.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc178
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc20
-rw-r--r--deps/v8/test/cctest/compiler/test-sloppy-equality.cc4
-rw-r--r--deps/v8/test/cctest/compiler/value-helper.h10
-rw-r--r--deps/v8/test/cctest/heap/test-array-buffer-tracker.cc12
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc5
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc5
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc421
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner-streams.cc4
-rw-r--r--deps/v8/test/cctest/test-accessors.cc2
-rw-r--r--deps/v8/test/cctest/test-api-array-buffer.cc13
-rw-r--r--deps/v8/test/cctest/test-api.cc67
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc81
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc15
-rw-r--r--deps/v8/test/cctest/test-concurrent-feedback-vector.cc4
-rw-r--r--deps/v8/test/cctest/test-concurrent-script-context-table.cc4
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc2
-rw-r--r--deps/v8/test/cctest/test-debug.cc21
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc47
-rw-r--r--deps/v8/test/cctest/test-disasm-loong64.cc1002
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc1077
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc38
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc2
-rw-r--r--deps/v8/test/cctest/test-intl.cc28
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc8
-rw-r--r--deps/v8/test/cctest/test-managed.cc3
-rw-r--r--deps/v8/test/cctest/test-regexp.cc9
-rw-r--r--deps/v8/test/cctest/test-serialize.cc36
-rw-r--r--deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc4
-rw-r--r--deps/v8/test/cctest/test-weakmaps.cc2
-rw-r--r--deps/v8/test/cctest/test-weaksets.cc2
-rw-r--r--deps/v8/test/cctest/torque/test-torque.cc6
-rw-r--r--deps/v8/test/cctest/wasm/test-gc.cc404
-rw-r--r--deps/v8/test/cctest/wasm/test-liftoff-for-fuzzing.cc32
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc34
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc34
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-js.cc8
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc14
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc328
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc97
-rw-r--r--deps/v8/test/cctest/wasm/test-streaming-compilation.cc126
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc4
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-stack.cc10
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc15
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h17
-rw-r--r--deps/v8/test/cctest/wasm/wasm-simd-utils.cc16
-rw-r--r--deps/v8/test/common/wasm/wasm-interpreter.cc64
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h40
-rw-r--r--deps/v8/test/fuzzer/regexp.cc2
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc316
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc4
-rw-r--r--deps/v8/test/inspector/debugger/async-for-await-of-promise-stack-expected.txt12
-rw-r--r--deps/v8/test/inspector/debugger/async-function-step-out-expected.txt111
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-await-expected.txt15
-rw-r--r--deps/v8/test/inspector/debugger/continue-to-location-target-call-frames-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/get-properties-paused-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/get-properties-paused.js6
-rw-r--r--deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate-expected.txt7
-rw-r--r--deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate.js24
-rw-r--r--deps/v8/test/inspector/inspector.status5
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1253277-expected.txt5
-rw-r--r--deps/v8/test/inspector/regress/regress-crbug-1253277.js23
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-without-side-effects-i18n-expected.txt74
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-without-side-effects-i18n.js182
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-expected.txt2
-rw-r--r--deps/v8/test/inspector/runtime/get-properties.js6
-rw-r--r--deps/v8/test/message/message.status7
-rw-r--r--deps/v8/test/mjsunit/compiler/bigint-add-no-deopt-loop.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-4.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-7.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-crbug-1228407.js24
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-crbug-1234764.js21
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-crbug-1234770.js14
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-crbug-1247763.js30
-rw-r--r--deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect2.js6
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-5929-1.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/private-brand-checks.js5
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-emoji-flag-sequence-generated.js270
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-emoji-keycap-sequence-generated.js24
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-emoji-modifier-sequence-generated.js541
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-emoji-tag-sequence-generated.js15
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-emoji-zwj-sequence-generated.js915
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-sequence.js88
-rw-r--r--deps/v8/test/mjsunit/ic-migrated-map-add-when-monomorphic.js3
-rw-r--r--deps/v8/test/mjsunit/json.js5
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status36
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-1248677.js24
-rw-r--r--deps/v8/test/mjsunit/regress/asm/regress-1252747.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1016450.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1073440.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-12256.js54
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1238033.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1254191.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-353004.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-9441.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1248704.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1249941.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1254704.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-10602.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-12194.js74
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1237024.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1239116.js19
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1239116b.js19
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1248024.js20
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1251465.js26
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1254674.js11
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1254675.js30
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1255354.js26
-rw-r--r--deps/v8/test/mjsunit/resizablearraybuffer-growablesharedarraybuffer.js92
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor-test-large.log2
-rw-r--r--deps/v8/test/mjsunit/typedarray-growablesharedarraybuffer.js697
-rw-r--r--deps/v8/test/mjsunit/typedarray-helpers.js47
-rw-r--r--deps/v8/test/mjsunit/typedarray-resizablearraybuffer-detach.js186
-rw-r--r--deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js1272
-rw-r--r--deps/v8/test/mjsunit/wasm/array-copy-benchmark.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/externref-table.js44
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-nominal.js59
-rw-r--r--deps/v8/test/mjsunit/wasm/inlining.js288
-rw-r--r--deps/v8/test/mjsunit/wasm/js-api.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/load-elimination.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/module-memory.js26
-rw-r--r--deps/v8/test/mjsunit/wasm/mutable-globals.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/prototype.js42
-rw-r--r--deps/v8/test/mjsunit/wasm/reference-globals.js30
-rw-r--r--deps/v8/test/mjsunit/wasm/shared-memory-gc-stress.js3
-rw-r--r--deps/v8/test/mjsunit/wasm/shared-memory-worker-explicit-gc-stress.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/table-fill.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/table.js16
-rw-r--r--deps/v8/test/mjsunit/wasm/type-reflection-with-externref.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/type-reflection.js125
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js73
-rw-r--r--deps/v8/test/test262/test262.status142
-rw-r--r--deps/v8/test/test262/testcfg.py1
-rw-r--r--deps/v8/test/torque/test-torque.tq184
-rw-r--r--deps/v8/test/unittests/BUILD.gn23
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc85
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-unittest.cc16
-rw-r--r--deps/v8/test/unittests/compiler/branch-elimination-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/control-equivalence-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/graph-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/loong64/instruction-selector-loong64-unittest.cc18
-rw-r--r--deps/v8/test/unittests/compiler/loop-peeling-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc33
-rw-r--r--deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc5
-rw-r--r--deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc17
-rw-r--r--deps/v8/test/unittests/debug/debug-property-iterator-unittest.cc50
-rw-r--r--deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc45
-rw-r--r--deps/v8/test/unittests/heap/cppgc/prefinalizer-unittest.cc10
-rw-r--r--deps/v8/test/unittests/heap/embedder-tracing-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/gc-tracer-unittest.cc64
-rw-r--r--deps/v8/test/unittests/heap/heap-unittest.cc7
-rw-r--r--deps/v8/test/unittests/heap/local-heap-unittest.cc8
-rw-r--r--deps/v8/test/unittests/heap/safepoint-unittest.cc4
-rw-r--r--deps/v8/test/unittests/heap/spaces-unittest.cc28
-rw-r--r--deps/v8/test/unittests/heap/unified-heap-unittest.cc8
-rw-r--r--deps/v8/test/unittests/heap/unmapper-unittest.cc10
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc6
-rw-r--r--deps/v8/test/unittests/regexp/regexp-unittest.cc6
-rw-r--r--deps/v8/test/unittests/torque/ls-message-unittest.cc13
-rw-r--r--deps/v8/test/unittests/torque/ls-server-data-unittest.cc74
-rw-r--r--deps/v8/test/unittests/torque/torque-unittest.cc117
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc222
-rw-r--r--deps/v8/test/unittests/wasm/memory-protection-unittest.cc169
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc182
-rw-r--r--deps/v8/test/unittests/wasm/subtyping-unittest.cc8
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-simulator-unittest.cc42
-rw-r--r--deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc2
-rw-r--r--deps/v8/test/wasm-api-tests/callbacks.cc6
-rw-r--r--deps/v8/test/wasm-api-tests/finalize.cc2
-rw-r--r--deps/v8/test/wasm-js/testcfg.py4
-rw-r--r--deps/v8/test/wasm-js/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-js/wasm-js.status23
-rw-r--r--deps/v8/test/wasm-spec-tests/testcfg.py5
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-spec-tests/wasm-spec-tests.status1
-rw-r--r--deps/v8/testing/gtest-support.h13
-rw-r--r--deps/v8/third_party/jinja2/tests.py2
-rw-r--r--deps/v8/third_party/v8/builtins/array-sort.tq145
-rw-r--r--deps/v8/third_party/zlib/google/zip_reader.cc6
-rw-r--r--deps/v8/third_party/zlib/google/zip_reader.h11
-rw-r--r--deps/v8/third_party/zlib/google/zip_unittest.cc5
-rw-r--r--deps/v8/third_party/zlib/google/zip_writer.h5
-rwxr-xr-xdeps/v8/tools/cppgc/gen_cmake.py4
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py4
-rwxr-xr-xdeps/v8/tools/release/list_deprecated.py234
-rw-r--r--deps/v8/tools/run_perf.py5
-rw-r--r--deps/v8/tools/system-analyzer/view/timeline/timeline-track-base.mjs3
-rw-r--r--deps/v8/tools/system-analyzer/view/timeline/timeline-track-stacked-base.mjs2
-rw-r--r--deps/v8/tools/testrunner/base_runner.py14
-rw-r--r--deps/v8/tools/testrunner/local/junit_output.py49
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py12
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py2
-rw-r--r--deps/v8/tools/testrunner/local/utils.py2
-rwxr-xr-xdeps/v8/tools/testrunner/num_fuzzer.py11
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py33
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py2
-rw-r--r--deps/v8/tools/testrunner/testproc/expectation.py6
-rw-r--r--deps/v8/tools/testrunner/testproc/filter.py2
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py40
-rw-r--r--deps/v8/tools/v8heapconst.py161
-rwxr-xr-xdeps/v8/tools/wasm/update-wasm-spec-tests.sh20
-rw-r--r--deps/v8/tools/whitespace.txt4
877 files changed, 27914 insertions, 19443 deletions
diff --git a/deps/v8/.vpython b/deps/v8/.vpython
index 3b7cb32468..d4a07677ca 100644
--- a/deps/v8/.vpython
+++ b/deps/v8/.vpython
@@ -24,6 +24,21 @@
python_version: "2.7"
+# The default set of platforms vpython checks does not yet include mac-arm64.
+# Setting `verify_pep425_tag` to the list of platforms we explicitly must support
+# allows us to ensure that vpython specs stay mac-arm64-friendly
+verify_pep425_tag: [
+ {python: "cp27", abi: "cp27mu", platform: "manylinux1_x86_64"},
+ {python: "cp27", abi: "cp27mu", platform: "linux_arm64"},
+ {python: "cp27", abi: "cp27mu", platform: "linux_armv6l"},
+
+ {python: "cp27", abi: "cp27m", platform: "macosx_10_10_intel"},
+ {python: "cp27", abi: "cp27m", platform: "macosx_11_0_arm64"},
+
+ {python: "cp27", abi: "cp27m", platform: "win32"},
+ {python: "cp27", abi: "cp27m", platform: "win_amd64"}
+]
+
# Needed by third_party/catapult/devil/devil, which is imported by
# build/android/test_runner.py when running performance tests.
wheel: <
diff --git a/deps/v8/BUILD.bazel b/deps/v8/BUILD.bazel
index e70b2f4b2d..23bce0f4bd 100644
--- a/deps/v8/BUILD.bazel
+++ b/deps/v8/BUILD.bazel
@@ -165,7 +165,6 @@ config_setting(
# v8_control_flow_integrity
# v8_enable_virtual_memory_cage
# cppgc_enable_caged_heap
-# cppgc_enable_check_assignments_in_prefinalizers
# cppgc_enable_object_names
# cppgc_enable_verify_heap
# cppgc_enable_young_generation
@@ -894,6 +893,8 @@ filegroup(
"src/torque/instance-type-generator.cc",
"src/torque/instructions.cc",
"src/torque/instructions.h",
+ "src/torque/kythe-data.cc",
+ "src/torque/kythe-data.h",
"src/torque/parameter-difference.h",
"src/torque/server-data.cc",
"src/torque/server-data.h",
@@ -1218,6 +1219,7 @@ filegroup(
"src/flags/flag-definitions.h",
"src/flags/flags.cc",
"src/flags/flags.h",
+ "src/handles/global-handles-inl.h",
"src/handles/global-handles.cc",
"src/handles/global-handles.h",
"src/handles/handles-inl.h",
@@ -1588,6 +1590,7 @@ filegroup(
"src/objects/lookup-inl.h",
"src/objects/lookup.cc",
"src/objects/lookup.h",
+ "src/objects/managed-inl.h",
"src/objects/managed.cc",
"src/objects/managed.h",
"src/objects/map-inl.h",
@@ -1619,6 +1622,8 @@ filegroup(
"src/objects/objects-definitions.h",
"src/objects/oddball-inl.h",
"src/objects/oddball.h",
+ "src/objects/option-utils.h",
+ "src/objects/option-utils.cc",
"src/objects/ordered-hash-table-inl.h",
"src/objects/ordered-hash-table.cc",
"src/objects/ordered-hash-table.h",
@@ -1708,6 +1713,7 @@ filegroup(
"src/objects/value-serializer.cc",
"src/objects/value-serializer.h",
"src/objects/visitors.cc",
+ "src/objects/visitors-inl.h",
"src/objects/visitors.h",
"src/parsing/expression-scope.h",
"src/parsing/func-name-inferrer.cc",
@@ -2727,6 +2733,7 @@ filegroup(
"src/bigint/bigint-internal.cc",
"src/bigint/bigint-internal.h",
"src/bigint/bigint.h",
+ "src/bigint/bitwise.cc",
"src/bigint/digit-arithmetic.h",
"src/bigint/div-barrett.cc",
"src/bigint/div-burnikel.cc",
@@ -2909,11 +2916,11 @@ v8_torque(
"exported-macros-assembler.h",
"factory.cc",
"factory.inc",
- "field-offsets.h",
"instance-types.h",
"interface-descriptors.inc",
"objects-body-descriptors-inl.inc",
"objects-printer.cc",
+ "visitor-lists.h",
],
args = select({
":is_v8_annotate_torque_ir": [ "-annotate-ir" ],
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 5c7d931b27..f491f2a4e6 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -293,10 +293,6 @@ declare_args() {
# Enables additional heap verification phases and checks.
cppgc_enable_verify_heap = ""
- # Enable assignment checks for Members/Persistents during prefinalizer invocations.
- # TODO(v8:11749): Enable by default after fixing any existing issues in Blink.
- cppgc_enable_check_assignments_in_prefinalizers = false
-
# Enable allocations during prefinalizer invocations.
cppgc_allow_allocations_in_prefinalizers = false
@@ -351,7 +347,9 @@ declare_args() {
# parameter count of function with JS linkage.
# TODO(v8:11112): Remove once all architectures support the flag and it is
# enabled unconditionally.
- v8_include_receiver_in_argc = false
+ v8_include_receiver_in_argc =
+ v8_current_cpu == "x86" || v8_current_cpu == "x64" ||
+ v8_current_cpu == "arm" || v8_current_cpu == "arm64"
}
# Derived defaults.
@@ -474,6 +472,13 @@ if (build_with_chromium && v8_current_cpu == "arm64" &&
v8_control_flow_integrity = true
}
+# Enable the virtual memory cage on 64-bit Chromium builds.
+if (build_with_chromium &&
+ (v8_current_cpu == "arm64" || v8_current_cpu == "x64")) {
+ # The cage is incompatible with lsan.
+ v8_enable_virtual_memory_cage = !is_lsan
+}
+
assert(!v8_disable_write_barriers || v8_enable_single_generation,
"Disabling write barriers works only with single generation")
@@ -789,10 +794,6 @@ config("features") {
defines += [ "CPPGC_VERIFY_HEAP" ]
}
- if (cppgc_enable_check_assignments_in_prefinalizers) {
- defines += [ "CPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS" ]
- }
-
if (cppgc_allow_allocations_in_prefinalizers) {
defines += [ "CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS" ]
}
@@ -1219,7 +1220,12 @@ config("toolchain") {
}
if (is_clang) {
- cflags += [ "-Wmissing-field-initializers" ]
+ cflags += [
+ "-Wmissing-field-initializers",
+
+ # TODO(v8:12245): Fix shadowing instances and remove.
+ "-Wno-shadow",
+ ]
if (v8_current_cpu != "mips" && v8_current_cpu != "mipsel") {
# We exclude MIPS because the IsMipsArchVariant macro causes trouble.
@@ -1255,7 +1261,144 @@ config("toolchain") {
}
if (!is_clang && is_win) {
- cflags += [ "/wd4506" ] # Benign "no definition for inline function"
+ cflags += [
+ "/wd4506", # Benign "no definition for inline function"
+
+ # Warnings permanently disabled:
+
+ # C4091: 'typedef ': ignored on left of 'X' when no variable is
+ # declared.
+ # This happens in a number of Windows headers. Dumb.
+ "/wd4091",
+
+ # C4127: conditional expression is constant
+ # This warning can in theory catch dead code and other problems, but
+ # triggers in far too many desirable cases where the conditional
+ # expression is either set by macros or corresponds some legitimate
+ # compile-time constant expression (due to constant template args,
+ # conditionals comparing the sizes of different types, etc.). Some of
+ # these can be worked around, but it's not worth it.
+ "/wd4127",
+
+ # C4251: 'identifier' : class 'type' needs to have dll-interface to be
+ # used by clients of class 'type2'
+ # This is necessary for the shared library build.
+ "/wd4251",
+
+ # C4275: non dll-interface class used as base for dll-interface class
+ # This points out a potential (but rare) problem with referencing static
+ # fields of a non-exported base, through the base's non-exported inline
+ # functions, or directly. The warning is subtle enough that people just
+ # suppressed it when they saw it, so it's not worth it.
+ "/wd4275",
+
+ # C4312 is a VS 2015 64-bit warning for integer to larger pointer.
+ # TODO(brucedawson): fix warnings, crbug.com/554200
+ "/wd4312",
+
+ # C4324 warns when padding is added to fulfill alignas requirements,
+ # but can trigger in benign cases that are difficult to individually
+ # suppress.
+ "/wd4324",
+
+ # C4351: new behavior: elements of array 'array' will be default
+ # initialized
+ # This is a silly "warning" that basically just alerts you that the
+ # compiler is going to actually follow the language spec like it's
+ # supposed to, instead of not following it like old buggy versions did.
+ # There's absolutely no reason to turn this on.
+ "/wd4351",
+
+ # C4355: 'this': used in base member initializer list
+ # It's commonly useful to pass |this| to objects in a class' initializer
+ # list. While this warning can catch real bugs, most of the time the
+ # constructors in question don't attempt to call methods on the passed-in
+ # pointer (until later), and annotating every legit usage of this is
+ # simply more hassle than the warning is worth.
+ "/wd4355",
+
+ # C4503: 'identifier': decorated name length exceeded, name was
+ # truncated
+ # This only means that some long error messages might have truncated
+ # identifiers in the presence of lots of templates. It has no effect on
+ # program correctness and there's no real reason to waste time trying to
+ # prevent it.
+ "/wd4503",
+
+ # Warning C4589 says: "Constructor of abstract class ignores
+ # initializer for virtual base class." Disable this warning because it
+ # is flaky in VS 2015 RTM. It triggers on compiler generated
+ # copy-constructors in some cases.
+ "/wd4589",
+
+ # C4611: interaction between 'function' and C++ object destruction is
+ # non-portable
+ # This warning is unavoidable when using e.g. setjmp/longjmp. MSDN
+ # suggests using exceptions instead of setjmp/longjmp for C++, but
+ # Chromium code compiles without exception support. We therefore have to
+ # use setjmp/longjmp for e.g. JPEG decode error handling, which means we
+ # have to turn off this warning (and be careful about how object
+ # destruction happens in such cases).
+ "/wd4611",
+
+ # Warnings to evaluate and possibly fix/reenable later:
+
+ "/wd4100", # Unreferenced formal function parameter.
+ "/wd4121", # Alignment of a member was sensitive to packing.
+ "/wd4244", # Conversion: possible loss of data.
+ "/wd4505", # Unreferenced local function has been removed.
+ "/wd4510", # Default constructor could not be generated.
+ "/wd4512", # Assignment operator could not be generated.
+ "/wd4610", # Class can never be instantiated, constructor required.
+ "/wd4838", # Narrowing conversion. Doesn't seem to be very useful.
+ "/wd4995", # 'X': name was marked as #pragma deprecated
+ "/wd4996", # Deprecated function warning.
+
+ # These are variable shadowing warnings that are new in VS2015. We
+ # should work through these at some point -- they may be removed from
+ # the RTM release in the /W4 set.
+ "/wd4456",
+ "/wd4457",
+ "/wd4458",
+ "/wd4459",
+
+ # All of our compilers support the extensions below.
+ "/wd4200", # nonstandard extension used: zero-sized array in struct/union
+ "/wd4201", # nonstandard extension used: nameless struct/union
+ "/wd4204", # nonstandard extension used : non-constant aggregate
+ # initializer
+
+ "/wd4221", # nonstandard extension used : 'identifier' : cannot be
+ # initialized using address of automatic variable
+
+ # http://crbug.com/588506 - Conversion suppressions waiting on Clang
+ # -Wconversion.
+ "/wd4245", # 'conversion' : conversion from 'type1' to 'type2',
+ # signed/unsigned mismatch
+
+ "/wd4267", # 'var' : conversion from 'size_t' to 'type', possible loss of
+ # data
+
+ "/wd4305", # 'identifier' : truncation from 'type1' to 'type2'
+ "/wd4389", # 'operator' : signed/unsigned mismatch
+
+ "/wd4702", # unreachable code
+
+ # http://crbug.com/848979 - MSVC is more conservative than Clang with
+ # regards to variables initialized and consumed in different branches.
+ "/wd4701", # Potentially uninitialized local variable 'name' used
+ "/wd4703", # Potentially uninitialized local pointer variable 'name' used
+
+ # http://crbug.com/848979 - Remaining Clang permitted warnings.
+ "/wd4661", # 'identifier' : no suitable definition provided for explicit
+ # template instantiation request
+
+ "/wd4706", # assignment within conditional expression
+ # MSVC is stricter and requires a boolean expression.
+
+ "/wd4715", # 'function' : not all control paths return a value'
+ # MSVC does not analyze switch (enum) for completeness.
+ ]
}
if (!is_clang && !is_win) {
@@ -1702,11 +1845,11 @@ template("run_torque") {
"$destination_folder/exported-macros-assembler.h",
"$destination_folder/factory.cc",
"$destination_folder/factory.inc",
- "$destination_folder/field-offsets.h",
"$destination_folder/instance-types.h",
"$destination_folder/interface-descriptors.inc",
"$destination_folder/objects-body-descriptors-inl.inc",
"$destination_folder/objects-printer.cc",
+ "$destination_folder/visitor-lists.h",
]
foreach(file, torque_files) {
@@ -2751,6 +2894,7 @@ v8_header_set("v8_internal_headers") {
"src/extensions/ignition-statistics-extension.h",
"src/extensions/statistics-extension.h",
"src/extensions/trigger-failure-extension.h",
+ "src/handles/global-handles-inl.h",
"src/handles/global-handles.h",
"src/handles/handles-inl.h",
"src/handles/handles.h",
@@ -3004,6 +3148,7 @@ v8_header_set("v8_internal_headers") {
"src/objects/lookup-cache.h",
"src/objects/lookup-inl.h",
"src/objects/lookup.h",
+ "src/objects/managed-inl.h",
"src/objects/managed.h",
"src/objects/map-inl.h",
"src/objects/map-updater.h",
@@ -3029,6 +3174,7 @@ v8_header_set("v8_internal_headers") {
"src/objects/objects.h",
"src/objects/oddball-inl.h",
"src/objects/oddball.h",
+ "src/objects/option-utils.h",
"src/objects/ordered-hash-table-inl.h",
"src/objects/ordered-hash-table.h",
"src/objects/osr-optimized-code-cache-inl.h",
@@ -3097,6 +3243,7 @@ v8_header_set("v8_internal_headers") {
"src/objects/transitions.h",
"src/objects/type-hints.h",
"src/objects/value-serializer.h",
+ "src/objects/visitors-inl.h",
"src/objects/visitors.h",
"src/parsing/expression-scope.h",
"src/parsing/func-name-inferrer.h",
@@ -3467,7 +3614,8 @@ v8_header_set("v8_internal_headers") {
(current_cpu == "x64" && (is_linux || is_chromeos || is_mac))) {
sources += [ "src/trap-handler/handler-inside-posix.h" ]
}
- if (current_cpu == "x64" && (is_linux || is_chromeos || is_mac)) {
+ if (current_cpu == "x64" &&
+ (is_linux || is_chromeos || is_mac || is_win)) {
sources += [ "src/trap-handler/trap-handler-simulator.h" ]
}
}
@@ -4072,6 +4220,7 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/module.cc",
"src/objects/object-type.cc",
"src/objects/objects.cc",
+ "src/objects/option-utils.cc",
"src/objects/ordered-hash-table.cc",
"src/objects/osr-optimized-code-cache.cc",
"src/objects/property-descriptor.cc",
@@ -4388,16 +4537,22 @@ v8_source_set("v8_base_without_compiler") {
"src/regexp/arm64/regexp-macro-assembler-arm64.cc",
]
if (v8_enable_webassembly) {
- # Trap handling is enabled on arm64 Mac and in simulators on x64 on Linux
- # and Mac.
+ # Trap handling is enabled on arm64 Mac and in simulators on x64 on Linux,
+ # Mac, and Windows.
if ((current_cpu == "arm64" && is_mac) ||
(current_cpu == "x64" && (is_linux || is_chromeos || is_mac))) {
sources += [
"src/trap-handler/handler-inside-posix.cc",
"src/trap-handler/handler-outside-posix.cc",
]
+ } else if (current_cpu == "x64" && is_win) {
+ sources += [
+ "src/trap-handler/handler-inside-win.cc",
+ "src/trap-handler/handler-outside-win.cc",
+ ]
}
- if (current_cpu == "x64" && (is_linux || is_chromeos || is_mac)) {
+ if (current_cpu == "x64" &&
+ (is_linux || is_chromeos || is_mac || is_win)) {
sources += [ "src/trap-handler/handler-outside-simulator.cc" ]
}
}
@@ -4675,6 +4830,8 @@ v8_source_set("torque_base") {
"src/torque/instance-type-generator.cc",
"src/torque/instructions.cc",
"src/torque/instructions.h",
+ "src/torque/kythe-data.cc",
+ "src/torque/kythe-data.h",
"src/torque/parameter-difference.h",
"src/torque/server-data.cc",
"src/torque/server-data.h",
@@ -5106,6 +5263,7 @@ v8_source_set("v8_bigint") {
"src/bigint/bigint-internal.cc",
"src/bigint/bigint-internal.h",
"src/bigint/bigint.h",
+ "src/bigint/bitwise.cc",
"src/bigint/digit-arithmetic.h",
"src/bigint/div-burnikel.cc",
"src/bigint/div-helpers.cc",
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 8059e3b8c3..587b7e5375 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -49,10 +49,10 @@ vars = {
'reclient_version': 're_client_version:0.40.0.40ff5a5',
# GN CIPD package version.
- 'gn_version': 'git_revision:69ec4fca1fa69ddadae13f9e6b7507efa0675263',
+ 'gn_version': 'git_revision:0153d369bbccc908f4da4993b1ba82728055926a',
# luci-go CIPD package version.
- 'luci_go': 'git_revision:7b62727dc713b47d7a7ce9bca27500cb8e82ebd7',
+ 'luci_go': 'git_revision:a373a19da0fbbbe81b2b684e3797260294393e40',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version
@@ -73,7 +73,7 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_platform-tools_version
# and whatever else without interference from each other.
- 'android_sdk_platform-tools_version': 'qi_k82nm6j9nz4dQosOoqXew4_TFAy8rcGOHDLptx1sC',
+ 'android_sdk_platform-tools_version': 'g7n_-r6yJd_SGRklujGB1wEt8iyr77FZTUJVS9w6O34C',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_platforms_version
# and whatever else without interference from each other.
@@ -85,16 +85,16 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_tools-lint_version
# and whatever else without interference from each other.
- 'android_sdk_cmdline-tools_version': 'ZT3JmI6GMG4YVcZ1OtECRVMOLLJAWAdPbi-OclubJLMC',
+ 'android_sdk_cmdline-tools_version': 'AuYa11pULKT8AI14_owabJrkZoRGuovL-nvwmiONlYEC',
}
deps = {
'base/trace_event/common':
- Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '715537d6007ca71837f48bcb04fc3d482aed2507',
+ Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '68d816952258c9d817bba656ee2664b35507f01b',
'build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + '17d097b0ffdc297f04afb54e9e3abff3f1203f06',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + 'ebad8533842661f66b9b905e0ee9890a32f628d5',
'buildtools':
- Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '7ea3a871db68ae2cbbeaf5433a3192a799ef3c11',
+ Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'a9bc3e283182a586998338a665c7eae17406ec54',
'buildtools/clang_format/script':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '99803d74e35962f63a775f29477882afd4d57d94',
'buildtools/linux64': {
@@ -120,9 +120,9 @@ deps = {
'buildtools/third_party/libc++/trunk':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '79a2e924d96e2fc1e4b937c42efd08898fa472d7',
'buildtools/third_party/libc++abi/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '17de75220a90f23a16f9f87fbc5c00dce475b726',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '9959b06ccd7291269796e85c7c8f7b432af414bd',
'buildtools/third_party/libunwind/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '44ea7aba6a34a9250e7793418d83f209a480caf4',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'a002c725cf03e16d3bc47dd9b7962aa22f7ee1d9',
'buildtools/win': {
'packages': [
{
@@ -148,14 +148,14 @@ deps = {
'test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '50f3fca7a0eac6b6e8e5e9aee7af3c2a05831261',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '50dd431dffe5cf86e9064a652d6b01dbbe542cf0',
'test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '278bcfaed0dcaa13936831fb1769d15e7c1e3b2b',
'third_party/aemu-linux-x64': {
'packages': [
{
'package': 'fuchsia/third_party/aemu/linux-amd64',
- 'version': 'QewYN5289B8deg5Mn6clWEv58UqpocHGKeob2F0T87kC'
+ 'version': 'FAd7QuRV-mCjbKgg2SO4BBlRCvGIsI672THjo3tEIZAC'
},
],
'condition': 'host_os == "linux" and checkout_fuchsia',
@@ -176,7 +176,7 @@ deps = {
'condition': 'checkout_android',
},
'third_party/android_platform': {
- 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '6e5dc9acd241c308385f970c384d9e083b2b6e56',
+ 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '7a11b799efba1cd679b4f5d14889465e9e1fb1f4',
'condition': 'checkout_android',
},
'third_party/android_sdk/public': {
@@ -218,7 +218,7 @@ deps = {
'dep_type': 'cipd',
},
'third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + '2331f088546de8f58dcc02daf8212254aaeb2d4c',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + 'c0b9d253fbf9a729be51d3890fa78be4b5eb3352',
'condition': 'checkout_android',
},
'third_party/colorama/src': {
@@ -226,20 +226,20 @@ deps = {
'condition': 'checkout_android',
},
'third_party/depot_tools':
- Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '728566654bb1d2c78cdbe6b642c0d68c6f658ca7',
+ Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '0e2fb336b2e7ddbbb9c5ab70eab25f82f55dff2b',
'third_party/fuchsia-sdk': {
'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '18896843130c33372c455c153ad07d2217bd2085',
'condition': 'checkout_fuchsia',
},
'third_party/google_benchmark/src': {
- 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + 'c23a0012523bc3e12c9323f398dcc433c4f19f05',
+ 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '0baacde3618ca617da95375e0af13ce1baadea47',
},
'third_party/googletest/src':
- Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '955c7f837efad184ec63e771c42542d37545eaef',
+ Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '3b49be074d5c1340eeb447e6a8e78427051e675a',
'third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'ece15d049f2d360721716089372e3749fb89e0f4',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '3f443830bd52d3aa5fab3c1aa2b6d0848bb5039d',
'third_party/instrumented_libraries':
- Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '47226fa33ef5c9b48668c74128f25ef82f10e7af',
+ Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '5df06a49fc485f3371e8ca2f4957dac4840ba3bb',
'third_party/ittapi': {
# Force checkout ittapi libraries to pass v8 header includes check on
# bots that has check_v8_header_includes enabled.
@@ -283,9 +283,9 @@ deps = {
'condition': 'checkout_android',
},
'third_party/zlib':
- Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '77c132322fe81a1f5518b326e18c99ebd3281627',
+ Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'dfa96e81458fb3b39676e45f7e9e000dff789b05',
'tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '664e4259b150e07f1a1e440459f59fbc68edb82f',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'c06edd1f455183fc89e9f8c2cf745db8f564d8ea',
'tools/clang/dsymutil': {
'packages': [
{
@@ -321,7 +321,18 @@ include_rules = [
'+include',
'+unicode',
'+third_party/fdlibm',
- '+third_party/ittapi/include'
+ '+third_party/ittapi/include',
+ # Abseil features are allow-listed. Please use your best judgement when adding
+ # to this set -- if in doubt, email v8-dev@. For general guidance, refer to
+ # the Chromium guidelines (though note that some requirements in V8 may be
+ # different to Chromium's):
+ # https://chromium.googlesource.com/chromium/src/+/main/styleguide/c++/c++11.md
+ '+absl/types/optional.h',
+ '+absl/types/variant.h',
+ '+absl/status',
+ # Some abseil features are explicitly banned.
+ '-absl/types/any.h', # Requires RTTI.
+ '-absl/types/flags', # Requires RTTI.
]
# checkdeps.py shouldn't check for includes in these directories:
@@ -483,7 +494,7 @@ hooks = [
'--no_resume',
'--no_auth',
'--bucket', 'chromium-instrumented-libraries',
- '-s', 'third_party/instrumented_libraries/binaries/msan-chained-origins-trusty.tgz.sha1',
+ '-s', 'third_party/instrumented_libraries/binaries/msan-chained-origins-xenial.tgz.sha1',
],
},
{
@@ -494,7 +505,7 @@ hooks = [
'--no_resume',
'--no_auth',
'--bucket', 'chromium-instrumented-libraries',
- '-s', 'third_party/instrumented_libraries/binaries/msan-no-origins-trusty.tgz.sha1',
+ '-s', 'third_party/instrumented_libraries/binaries/msan-no-origins-xenial.tgz.sha1',
],
},
{
diff --git a/deps/v8/ENG_REVIEW_OWNERS b/deps/v8/ENG_REVIEW_OWNERS
index e5040c45ad..3943c49432 100644
--- a/deps/v8/ENG_REVIEW_OWNERS
+++ b/deps/v8/ENG_REVIEW_OWNERS
@@ -5,3 +5,4 @@
adamk@chromium.org
danno@chromium.org
hpayer@chromium.org
+verwaest@chromium.org
diff --git a/deps/v8/RISCV_OWNERS b/deps/v8/RISCV_OWNERS
index 8f8e15a40a..e3e11fdf49 100644
--- a/deps/v8/RISCV_OWNERS
+++ b/deps/v8/RISCV_OWNERS
@@ -1,3 +1,4 @@
brice.dobry@futurewei.com
peng.w@rioslab.org
qiuji@iscas.ac.cn
+yahan@iscas.ac.cn
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index 62f3c2ec07..9384adeb69 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -144,12 +144,15 @@
// class MyData : public base::trace_event::ConvertableToTraceFormat {
// public:
// MyData() {}
+//
+// MyData(const MyData&) = delete;
+// MyData& operator=(const MyData&) = delete;
+//
// void AppendAsTraceFormat(std::string* out) const override {
// out->append("{\"foo\":1}");
// }
// private:
// ~MyData() override {}
-// DISALLOW_COPY_AND_ASSIGN(MyData);
// };
//
// TRACE_EVENT1("foo", "bar", "data",
diff --git a/deps/v8/include/cppgc/internal/persistent-node.h b/deps/v8/include/cppgc/internal/persistent-node.h
index b5dba476a4..1fea667848 100644
--- a/deps/v8/include/cppgc/internal/persistent-node.h
+++ b/deps/v8/include/cppgc/internal/persistent-node.h
@@ -75,16 +75,16 @@ class PersistentNode final {
TraceCallback trace_ = nullptr;
};
-class V8_EXPORT PersistentRegion {
+class V8_EXPORT PersistentRegionBase {
using PersistentNodeSlots = std::array<PersistentNode, 256u>;
public:
- PersistentRegion() = default;
+ PersistentRegionBase() = default;
// Clears Persistent fields to avoid stale pointers after heap teardown.
- ~PersistentRegion();
+ ~PersistentRegionBase();
- PersistentRegion(const PersistentRegion&) = delete;
- PersistentRegion& operator=(const PersistentRegion&) = delete;
+ PersistentRegionBase(const PersistentRegionBase&) = delete;
+ PersistentRegionBase& operator=(const PersistentRegionBase&) = delete;
PersistentNode* AllocateNode(void* owner, TraceCallback trace) {
if (!free_list_head_) {
@@ -126,8 +126,39 @@ class V8_EXPORT PersistentRegion {
friend class CrossThreadPersistentRegion;
};
-// CrossThreadPersistent uses PersistentRegion but protects it using this lock
-// when needed.
+// Variant of PersistentRegionBase that checks whether the allocation and
+// freeing happens only on the thread that created the region.
+class V8_EXPORT PersistentRegion final : public PersistentRegionBase {
+ public:
+ PersistentRegion();
+ // Clears Persistent fields to avoid stale pointers after heap teardown.
+ ~PersistentRegion() = default;
+
+ PersistentRegion(const PersistentRegion&) = delete;
+ PersistentRegion& operator=(const PersistentRegion&) = delete;
+
+ V8_INLINE PersistentNode* AllocateNode(void* owner, TraceCallback trace) {
+#if V8_ENABLE_CHECKS
+ CheckIsCreationThread();
+#endif // V8_ENABLE_CHECKS
+ return PersistentRegionBase::AllocateNode(owner, trace);
+ }
+
+ V8_INLINE void FreeNode(PersistentNode* node) {
+#if V8_ENABLE_CHECKS
+ CheckIsCreationThread();
+#endif // V8_ENABLE_CHECKS
+ PersistentRegionBase::FreeNode(node);
+ }
+
+ private:
+ void CheckIsCreationThread();
+
+ int creation_thread_id_;
+};
+
+// CrossThreadPersistent uses PersistentRegionBase but protects it using this
+// lock when needed.
class V8_EXPORT PersistentRegionLock final {
public:
PersistentRegionLock();
@@ -136,9 +167,10 @@ class V8_EXPORT PersistentRegionLock final {
static void AssertLocked();
};
-// Variant of PersistentRegion that checks whether the PersistentRegionLock is
-// locked.
-class V8_EXPORT CrossThreadPersistentRegion final : protected PersistentRegion {
+// Variant of PersistentRegionBase that checks whether the PersistentRegionLock
+// is locked.
+class V8_EXPORT CrossThreadPersistentRegion final
+ : protected PersistentRegionBase {
public:
CrossThreadPersistentRegion() = default;
// Clears Persistent fields to avoid stale pointers after heap teardown.
@@ -150,12 +182,12 @@ class V8_EXPORT CrossThreadPersistentRegion final : protected PersistentRegion {
V8_INLINE PersistentNode* AllocateNode(void* owner, TraceCallback trace) {
PersistentRegionLock::AssertLocked();
- return PersistentRegion::AllocateNode(owner, trace);
+ return PersistentRegionBase::AllocateNode(owner, trace);
}
V8_INLINE void FreeNode(PersistentNode* node) {
PersistentRegionLock::AssertLocked();
- PersistentRegion::FreeNode(node);
+ PersistentRegionBase::FreeNode(node);
}
void Trace(Visitor*);
diff --git a/deps/v8/include/cppgc/internal/pointer-policies.h b/deps/v8/include/cppgc/internal/pointer-policies.h
index cdf0bb693d..7c4f4a0862 100644
--- a/deps/v8/include/cppgc/internal/pointer-policies.h
+++ b/deps/v8/include/cppgc/internal/pointer-policies.h
@@ -51,7 +51,17 @@ struct NoWriteBarrierPolicy {
static void AssigningBarrier(const void*, const void*) {}
};
-class V8_EXPORT EnabledCheckingPolicy {
+class V8_EXPORT SameThreadEnabledCheckingPolicyBase {
+ protected:
+ void CheckPointerImpl(const void* ptr, bool points_to_payload,
+ bool check_off_heap_assignments);
+
+ const HeapBase* heap_ = nullptr;
+};
+
+template <bool kCheckOffHeapAssignments>
+class V8_EXPORT SameThreadEnabledCheckingPolicy
+ : private SameThreadEnabledCheckingPolicyBase {
protected:
template <typename T>
void CheckPointer(const T* ptr) {
@@ -61,23 +71,20 @@ class V8_EXPORT EnabledCheckingPolicy {
}
private:
- void CheckPointerImpl(const void* ptr, bool points_to_payload);
-
template <typename T, bool = IsCompleteV<T>>
struct CheckPointersImplTrampoline {
- static void Call(EnabledCheckingPolicy* policy, const T* ptr) {
- policy->CheckPointerImpl(ptr, false);
+ static void Call(SameThreadEnabledCheckingPolicy* policy, const T* ptr) {
+ policy->CheckPointerImpl(ptr, false, kCheckOffHeapAssignments);
}
};
template <typename T>
struct CheckPointersImplTrampoline<T, true> {
- static void Call(EnabledCheckingPolicy* policy, const T* ptr) {
- policy->CheckPointerImpl(ptr, IsGarbageCollectedTypeV<T>);
+ static void Call(SameThreadEnabledCheckingPolicy* policy, const T* ptr) {
+ policy->CheckPointerImpl(ptr, IsGarbageCollectedTypeV<T>,
+ kCheckOffHeapAssignments);
}
};
-
- const HeapBase* heap_ = nullptr;
};
class DisabledCheckingPolicy {
@@ -86,8 +93,12 @@ class DisabledCheckingPolicy {
};
#if V8_ENABLE_CHECKS
-using DefaultMemberCheckingPolicy = EnabledCheckingPolicy;
-using DefaultPersistentCheckingPolicy = EnabledCheckingPolicy;
+// Off heap members are not connected to object graph and thus cannot ressurect
+// dead objects.
+using DefaultMemberCheckingPolicy =
+ SameThreadEnabledCheckingPolicy<false /* kCheckOffHeapAssignments*/>;
+using DefaultPersistentCheckingPolicy =
+ SameThreadEnabledCheckingPolicy<true /* kCheckOffHeapAssignments*/>;
#else
using DefaultMemberCheckingPolicy = DisabledCheckingPolicy;
using DefaultPersistentCheckingPolicy = DisabledCheckingPolicy;
diff --git a/deps/v8/include/cppgc/internal/write-barrier.h b/deps/v8/include/cppgc/internal/write-barrier.h
index 28184dc9c8..67f039c658 100644
--- a/deps/v8/include/cppgc/internal/write-barrier.h
+++ b/deps/v8/include/cppgc/internal/write-barrier.h
@@ -214,6 +214,11 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
static V8_INLINE WriteBarrier::Type Get(const void* slot, const void* value,
WriteBarrier::Params& params,
HeapHandleCallback) {
+#if !defined(CPPGC_YOUNG_GENERATION)
+ if (V8_LIKELY(!WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) {
+ return SetAndReturnType<WriteBarrier::Type::kNone>(params);
+ }
+#endif // !CPPGC_YOUNG_GENERATION
bool within_cage = TryGetCagedHeap(slot, value, params);
if (!within_cage) {
return WriteBarrier::Type::kNone;
@@ -317,7 +322,10 @@ struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch<
HeapHandleCallback callback) {
// The following check covers nullptr as well as sentinel pointer.
if (object <= static_cast<void*>(kSentinelPointer)) {
- return WriteBarrier::Type::kNone;
+ return SetAndReturnType<WriteBarrier::Type::kNone>(params);
+ }
+ if (V8_LIKELY(!WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) {
+ return SetAndReturnType<WriteBarrier::Type::kNone>(params);
}
if (IsMarking(object, &params.heap)) {
return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
diff --git a/deps/v8/include/cppgc/persistent.h b/deps/v8/include/cppgc/persistent.h
index b83a464576..182fb08549 100644
--- a/deps/v8/include/cppgc/persistent.h
+++ b/deps/v8/include/cppgc/persistent.h
@@ -45,7 +45,7 @@ class PersistentBase {
mutable const void* raw_ = nullptr;
mutable PersistentNode* node_ = nullptr;
- friend class PersistentRegion;
+ friend class PersistentRegionBase;
};
// The basic class from which all Persistent classes are generated.
diff --git a/deps/v8/include/v8-callbacks.h b/deps/v8/include/v8-callbacks.h
index f424a24d8b..870df6a821 100644
--- a/deps/v8/include/v8-callbacks.h
+++ b/deps/v8/include/v8-callbacks.h
@@ -308,6 +308,9 @@ using WasmSimdEnabledCallback = bool (*)(Local<Context> context);
// --- Callback for checking if WebAssembly exceptions are enabled ---
using WasmExceptionsEnabledCallback = bool (*)(Local<Context> context);
+// --- Callback for checking if WebAssembly dynamic tiering is enabled ---
+using WasmDynamicTieringEnabledCallback = bool (*)(Local<Context> context);
+
// --- Callback for checking if the SharedArrayBuffer constructor is enabled ---
using SharedArrayBufferConstructorEnabledCallback =
bool (*)(Local<Context> context);
diff --git a/deps/v8/include/v8-fast-api-calls.h b/deps/v8/include/v8-fast-api-calls.h
index 90cbe680ba..854f845aba 100644
--- a/deps/v8/include/v8-fast-api-calls.h
+++ b/deps/v8/include/v8-fast-api-calls.h
@@ -277,6 +277,17 @@ class CTypeInfo {
Flags flags = Flags::kNone)
: type_(type), sequence_type_(sequence_type), flags_(flags) {}
+ typedef uint32_t Identifier;
+ explicit constexpr CTypeInfo(Identifier identifier)
+ : CTypeInfo(static_cast<Type>(identifier >> 16),
+ static_cast<SequenceType>((identifier >> 8) & 255),
+ static_cast<Flags>(identifier & 255)) {}
+ constexpr Identifier GetId() const {
+ return static_cast<uint8_t>(type_) << 16 |
+ static_cast<uint8_t>(sequence_type_) << 8 |
+ static_cast<uint8_t>(flags_);
+ }
+
constexpr Type GetType() const { return type_; }
constexpr SequenceType GetSequenceType() const { return sequence_type_; }
constexpr Flags GetFlags() const { return flags_; }
@@ -324,6 +335,14 @@ struct FastApiTypedArray : public FastApiTypedArrayBase {
return tmp;
}
+ bool getStorageIfAligned(T** elements) const {
+ if (reinterpret_cast<uintptr_t>(data_) % alignof(T) != 0) {
+ return false;
+ }
+ *elements = reinterpret_cast<T*>(data_);
+ return true;
+ }
+
private:
// This pointer should include the typed array offset applied.
// It's not guaranteed that it's aligned to sizeof(T), it's only
@@ -466,7 +485,7 @@ class V8_EXPORT CFunction {
};
};
-struct ApiObject {
+struct V8_DEPRECATE_SOON("Use v8::Local<v8::Value> instead.") ApiObject {
uintptr_t address;
};
@@ -816,23 +835,54 @@ static constexpr CTypeInfo kTypeInfoFloat64 =
* returns true on success. `type_info` will be used for conversions.
*/
template <const CTypeInfo* type_info, typename T>
-bool V8_EXPORT V8_WARN_UNUSED_RESULT TryCopyAndConvertArrayToCppBuffer(
- Local<Array> src, T* dst, uint32_t max_length);
+V8_DEPRECATE_SOON(
+ "Use TryToCopyAndConvertArrayToCppBuffer<CTypeInfo::Identifier, T>()")
+bool V8_EXPORT V8_WARN_UNUSED_RESULT
+ TryCopyAndConvertArrayToCppBuffer(Local<Array> src, T* dst,
+ uint32_t max_length);
template <>
+V8_DEPRECATE_SOON(
+ "Use TryToCopyAndConvertArrayToCppBuffer<CTypeInfo::Identifier, T>()")
inline bool V8_WARN_UNUSED_RESULT
-TryCopyAndConvertArrayToCppBuffer<&kTypeInfoInt32, int32_t>(
- Local<Array> src, int32_t* dst, uint32_t max_length) {
- return CopyAndConvertArrayToCppBufferInt32(src, dst, max_length);
+ TryCopyAndConvertArrayToCppBuffer<&kTypeInfoInt32, int32_t>(
+ Local<Array> src, int32_t* dst, uint32_t max_length) {
+ return false;
}
template <>
+V8_DEPRECATE_SOON(
+ "Use TryToCopyAndConvertArrayToCppBuffer<CTypeInfo::Identifier, T>()")
inline bool V8_WARN_UNUSED_RESULT
-TryCopyAndConvertArrayToCppBuffer<&kTypeInfoFloat64, double>(
- Local<Array> src, double* dst, uint32_t max_length) {
- return CopyAndConvertArrayToCppBufferFloat64(src, dst, max_length);
+ TryCopyAndConvertArrayToCppBuffer<&kTypeInfoFloat64, double>(
+ Local<Array> src, double* dst, uint32_t max_length) {
+ return false;
}
+template <CTypeInfo::Identifier type_info_id, typename T>
+bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer(
+ Local<Array> src, T* dst, uint32_t max_length);
+
+template <>
+bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer<
+ internal::CTypeInfoBuilder<int32_t>::Build().GetId(), int32_t>(
+ Local<Array> src, int32_t* dst, uint32_t max_length);
+
+template <>
+bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer<
+ internal::CTypeInfoBuilder<uint32_t>::Build().GetId(), uint32_t>(
+ Local<Array> src, uint32_t* dst, uint32_t max_length);
+
+template <>
+bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer<
+ internal::CTypeInfoBuilder<float>::Build().GetId(), float>(
+ Local<Array> src, float* dst, uint32_t max_length);
+
+template <>
+bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer<
+ internal::CTypeInfoBuilder<double>::Build().GetId(), double>(
+ Local<Array> src, double* dst, uint32_t max_length);
+
} // namespace v8
#endif // INCLUDE_V8_FAST_API_CALLS_H_
diff --git a/deps/v8/include/v8-forward.h b/deps/v8/include/v8-forward.h
index ae16fe64b2..db3a2017b7 100644
--- a/deps/v8/include/v8-forward.h
+++ b/deps/v8/include/v8-forward.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef INCLUDE_V8_LOCAL_HANDLES_H_
-#define INCLUDE_V8_LOCAL_HANDLES_H_
+#ifndef INCLUDE_V8_FORWARD_H_
+#define INCLUDE_V8_FORWARD_H_
// This header is intended to be used by headers that pass around V8 types,
// either by pointer or using Local<Type>. The full definitions can be included
@@ -27,6 +27,7 @@ class Context;
class DataView;
class Data;
class Date;
+class Extension;
class External;
class FixedArray;
class Float32Array;
@@ -63,6 +64,7 @@ class StringObject;
class Symbol;
class SymbolObject;
class Template;
+class TryCatch;
class TypedArray;
class Uint16Array;
class Uint32;
@@ -76,4 +78,4 @@ class WasmModuleObject;
} // namespace v8
-#endif // INCLUDE_V8_LOCAL_HANDLES_H_
+#endif // INCLUDE_V8_FORWARD_H_
diff --git a/deps/v8/include/v8-initialization.h b/deps/v8/include/v8-initialization.h
index 3b609292f6..7c9f26b892 100644
--- a/deps/v8/include/v8-initialization.h
+++ b/deps/v8/include/v8-initialization.h
@@ -195,22 +195,38 @@ class V8_EXPORT V8 {
* This must be invoked after the platform was initialized but before V8 is
* initialized. The virtual memory cage is torn down during platform shutdown.
* Returns true on success, false otherwise.
+ *
+ * TODO(saelo) Once it is no longer optional to create the virtual memory
+ * cage when compiling with V8_VIRTUAL_MEMORY_CAGE, the cage initialization
+ * will likely happen as part of V8::Initialize, at which point this function
+ * should be removed.
*/
static bool InitializeVirtualMemoryCage();
/**
- * Provides access to the data page allocator for the virtual memory cage.
+ * Provides access to the virtual memory cage page allocator.
+ *
+ * This allocator allocates pages inside the virtual memory cage. It can for
+ * example be used to obtain virtual memory for ArrayBuffer backing stores,
+ * which must be located inside the cage.
+ *
+ * It should be assumed that an attacker can corrupt data inside the cage,
+ * and so in particular the contents of pages returned by this allocator,
+ * arbitrarily and concurrently. Due to this, it is recommended to to only
+ * place pure data buffers in pages obtained through this allocator.
*
- * This allocator allocates pages inside the data cage part of the virtual
- * memory cage in which data buffers such as ArrayBuffer backing stores must
- * be allocated. Objects in this region should generally consists purely of
- * data and not contain any pointers. It should be assumed that an attacker
- * can corrupt data inside the cage, and so in particular the contents of
- * pages returned by this allocator, arbitrarily and concurrently.
+ * This function must only be called after initializing the virtual memory
+ * cage and V8.
+ */
+ static PageAllocator* GetVirtualMemoryCagePageAllocator();
+
+ /**
+ * Returns the size of the virtual memory cage in bytes.
*
- * The virtual memory cage must have been initialized before.
+ * If the cage has not been initialized, or if the initialization failed,
+ * this returns zero.
*/
- static PageAllocator* GetVirtualMemoryCageDataPageAllocator();
+ static size_t GetVirtualMemoryCageSizeInBytes();
#endif
/**
diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h
index 4a84fc066a..e1aee508bb 100644
--- a/deps/v8/include/v8-internal.h
+++ b/deps/v8/include/v8-internal.h
@@ -495,15 +495,10 @@ constexpr bool VirtualMemoryCageIsEnabled() {
}
#ifdef V8_VIRTUAL_MEMORY_CAGE
-// Size of the pointer compression cage located at the start of the virtual
-// memory cage.
-constexpr size_t kVirtualMemoryCagePointerCageSize =
- Internals::kPtrComprCageReservationSize;
-
// Size of the virtual memory cage, excluding the guard regions surrounding it.
constexpr size_t kVirtualMemoryCageSize = size_t{1} << 40; // 1 TB
-static_assert(kVirtualMemoryCageSize > kVirtualMemoryCagePointerCageSize,
+static_assert(kVirtualMemoryCageSize > Internals::kPtrComprCageReservationSize,
"The virtual memory cage must be larger than the pointer "
"compression cage contained within it.");
@@ -525,19 +520,21 @@ static_assert((kVirtualMemoryCageGuardRegionSize %
"The size of the virtual memory cage guard region must be a "
"multiple of its required alignment.");
-// Minimum possible size of the virtual memory cage, excluding the guard regions
-// surrounding it. Used by unit tests.
-constexpr size_t kVirtualMemoryCageMinimumSize =
- 2 * kVirtualMemoryCagePointerCageSize;
+// Minimum size of the virtual memory cage, excluding the guard regions
+// surrounding it. If the cage reservation fails, its size is currently halved
+// until either the reservation succeeds or the minimum size is reached. A
+// minimum of 32GB allows the 4GB pointer compression region as well as the
+// ArrayBuffer partition and two 10GB WASM memory cages to fit into the cage.
+constexpr size_t kVirtualMemoryCageMinimumSize = size_t{32} << 30; // 32 GB
// For now, even if the virtual memory cage is enabled, we still allow backing
// stores to be allocated outside of it as fallback. This will simplify the
// initial rollout. However, if the heap sandbox is also enabled, we already use
// the "enforcing mode" of the virtual memory cage. This is useful for testing.
#ifdef V8_HEAP_SANDBOX
-constexpr bool kAllowBackingStoresOutsideDataCage = false;
+constexpr bool kAllowBackingStoresOutsideCage = false;
#else
-constexpr bool kAllowBackingStoresOutsideDataCage = true;
+constexpr bool kAllowBackingStoresOutsideCage = true;
#endif // V8_HEAP_SANDBOX
#endif // V8_VIRTUAL_MEMORY_CAGE
@@ -572,14 +569,6 @@ class BackingStoreBase {};
} // namespace internal
-V8_EXPORT bool CopyAndConvertArrayToCppBufferInt32(Local<Array> src,
- int32_t* dst,
- uint32_t max_length);
-
-V8_EXPORT bool CopyAndConvertArrayToCppBufferFloat64(Local<Array> src,
- double* dst,
- uint32_t max_length);
-
} // namespace v8
#endif // INCLUDE_V8_INTERNAL_H_
diff --git a/deps/v8/include/v8-isolate.h b/deps/v8/include/v8-isolate.h
index dc4af456b5..39276b34a9 100644
--- a/deps/v8/include/v8-isolate.h
+++ b/deps/v8/include/v8-isolate.h
@@ -1482,6 +1482,9 @@ class V8_EXPORT Isolate {
void SetWasmExceptionsEnabledCallback(WasmExceptionsEnabledCallback callback);
+ void SetWasmDynamicTieringEnabledCallback(
+ WasmDynamicTieringEnabledCallback callback);
+
void SetSharedArrayBufferConstructorEnabledCallback(
SharedArrayBufferConstructorEnabledCallback callback);
diff --git a/deps/v8/include/v8-locker.h b/deps/v8/include/v8-locker.h
index b90fc5ed91..360022b7d9 100644
--- a/deps/v8/include/v8-locker.h
+++ b/deps/v8/include/v8-locker.h
@@ -64,7 +64,7 @@ class Isolate;
* given thread. This can be useful if you have code that can be called either
* from code that holds the lock or from code that does not. The Unlocker is
* not recursive so you can not have several Unlockers on the stack at once, and
- * you can not use an Unlocker in a thread that is not inside a Locker's scope.
+ * you cannot use an Unlocker in a thread that is not inside a Locker's scope.
*
* An unlocker will unlock several lockers if it has to and reinstate the
* correct depth of locking on its destruction, e.g.:
@@ -122,8 +122,13 @@ class V8_EXPORT Locker {
static bool IsLocked(Isolate* isolate);
/**
- * Returns whether v8::Locker is being used by this V8 instance.
+ * Returns whether any v8::Locker has ever been used in this process.
+ * TODO(cbruni, chromium:1240851): Fix locking checks on a per-thread basis.
+ * The current implementation is quite confusing and leads to unexpected
+ * results if anybody uses v8::Locker in the current process.
*/
+ static bool WasEverUsed();
+ V8_DEPRECATE_SOON("Use WasEverUsed instead")
static bool IsActive();
// Disallow copying and assigning.
diff --git a/deps/v8/include/v8-message.h b/deps/v8/include/v8-message.h
index 566d830e0d..62b6bd92f9 100644
--- a/deps/v8/include/v8-message.h
+++ b/deps/v8/include/v8-message.h
@@ -7,6 +7,8 @@
#include <stdio.h>
+#include <iosfwd>
+
#include "v8-local-handle.h" // NOLINT(build/include_directory)
#include "v8-maybe.h" // NOLINT(build/include_directory)
#include "v8config.h" // NOLINT(build/include_directory)
@@ -58,9 +60,7 @@ class ScriptOriginOptions {
*/
class V8_EXPORT ScriptOrigin {
public:
- #if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */
V8_DEPRECATE_SOON("Use constructor with primitive C++ types")
- #endif
ScriptOrigin(
Local<Value> resource_name, Local<Integer> resource_line_offset,
Local<Integer> resource_column_offset,
@@ -71,9 +71,7 @@ class V8_EXPORT ScriptOrigin {
Local<Boolean> is_wasm = Local<Boolean>(),
Local<Boolean> is_module = Local<Boolean>(),
Local<PrimitiveArray> host_defined_options = Local<PrimitiveArray>());
- #if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */
V8_DEPRECATE_SOON("Use constructor that takes an isolate")
- #endif
explicit ScriptOrigin(
Local<Value> resource_name, int resource_line_offset = 0,
int resource_column_offset = 0,
@@ -210,8 +208,9 @@ class V8_EXPORT Message {
bool IsSharedCrossOrigin() const;
bool IsOpaque() const;
- // TODO(1245381): Print to a string instead of on a FILE.
+ V8_DEPRECATE_SOON("Use the version that takes a std::ostream&.")
static void PrintCurrentStackTrace(Isolate* isolate, FILE* out);
+ static void PrintCurrentStackTrace(Isolate* isolate, std::ostream& out);
static const int kNoLineNumberInfo = 0;
static const int kNoColumnInfo = 0;
diff --git a/deps/v8/include/v8-script.h b/deps/v8/include/v8-script.h
index 370903b20a..d17089932c 100644
--- a/deps/v8/include/v8-script.h
+++ b/deps/v8/include/v8-script.h
@@ -209,7 +209,7 @@ class V8_EXPORT Module : public Data {
*/
int GetIdentityHash() const;
- using ResolveCallback =
+ using ResolveCallback V8_DEPRECATE_SOON("Use ResolveModuleCallback") =
MaybeLocal<Module> (*)(Local<Context> context, Local<String> specifier,
Local<Module> referrer);
using ResolveModuleCallback = MaybeLocal<Module> (*)(
diff --git a/deps/v8/include/v8-template.h b/deps/v8/include/v8-template.h
index b05639cfc1..96fcab6074 100644
--- a/deps/v8/include/v8-template.h
+++ b/deps/v8/include/v8-template.h
@@ -27,6 +27,7 @@ class Signature;
F(ArrayProto_forEach, array_for_each_iterator) \
F(ArrayProto_keys, array_keys_iterator) \
F(ArrayProto_values, array_values_iterator) \
+ F(ArrayPrototype, initial_array_prototype) \
F(AsyncIteratorPrototype, initial_async_iterator_prototype) \
F(ErrorPrototype, initial_error_prototype) \
F(IteratorPrototype, initial_iterator_prototype) \
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index b39e2dc208..6078b78bd4 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 9
-#define V8_MINOR_VERSION 5
-#define V8_BUILD_NUMBER 172
-#define V8_PATCH_LEVEL 25
+#define V8_MINOR_VERSION 6
+#define V8_BUILD_NUMBER 180
+#define V8_PATCH_LEVEL 14
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index f80c637634..e3afd9787b 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -88,11 +88,10 @@
'V8 Win64 - debug': 'debug_x64_minimal_symbols',
'V8 Win64 - msvc': 'release_x64_msvc',
# Mac.
- 'V8 Mac64': 'release_x64',
- 'V8 Mac64 - debug': 'debug_x64',
+ 'V8 Mac64 - builder': 'release_x64',
+ 'V8 Mac64 - debug builder': 'debug_x64',
'V8 Official Mac ARM64': 'release_arm64',
'V8 Official Mac ARM64 Debug': 'debug_arm64',
- 'V8 Mac64 GC Stress': 'debug_x64',
'V8 Mac64 ASAN': 'release_x64_asan_no_lsan',
'V8 Mac - arm64 - release builder': 'release_arm64',
'V8 Mac - arm64 - debug builder': 'debug_arm64',
@@ -108,6 +107,7 @@
'V8 Linux gcc': 'release_x86_gcc',
# FYI.
'V8 iOS - sim': 'release_x64_ios_simulator',
+ 'V8 Linux64 - arm64 - sim - heap sandbox - debug - builder': 'debug_x64_heap_sandbox_arm64_sim',
'V8 Linux64 - cppgc-non-default - debug - builder': 'debug_x64_non_default_cppgc',
'V8 Linux64 - debug - perfetto - builder': 'debug_x64_perfetto',
'V8 Linux64 - disable runtime call stats': 'release_x64_disable_runtime_call_stats',
@@ -232,6 +232,7 @@
'v8_linux64_gcov_coverage': 'release_x64_gcc_coverage',
'v8_linux64_header_includes_dbg': 'debug_x64_header_includes',
'v8_linux64_heap_sandbox_dbg_ng': 'debug_x64_heap_sandbox',
+ 'v8_linux_arm64_sim_heap_sandbox_dbg_ng': 'debug_x64_heap_sandbox_arm64_sim',
'v8_linux64_fyi_rel_ng': 'release_x64_test_features_trybot',
'v8_linux64_nodcheck_rel_ng': 'release_x64',
'v8_linux64_perfetto_dbg_ng': 'debug_x64_perfetto',
@@ -573,6 +574,8 @@
'debug_bot', 'x64', 'v8_check_header_includes'],
'debug_x64_heap_sandbox': [
'debug_bot', 'x64', 'v8_enable_heap_sandbox'],
+ 'debug_x64_heap_sandbox_arm64_sim': [
+ 'debug_bot', 'simulate_arm64', 'v8_enable_heap_sandbox'],
'debug_x64_minimal_symbols': [
'debug_bot', 'x64', 'minimal_symbols'],
'debug_x64_non_default_cppgc': [
@@ -805,7 +808,7 @@
},
'reclient': {
- 'gn_args': 'use_rbe=true',
+ 'gn_args': 'use_rbe=true use_remoteexec=true',
},
'release': {
diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl
index abdadb9af9..f17f651212 100644
--- a/deps/v8/infra/testing/builders.pyl
+++ b/deps/v8/infra/testing/builders.pyl
@@ -593,6 +593,14 @@
{'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 12},
],
},
+ 'v8_linux_arm64_sim_heap_sandbox_dbg_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 14},
+ ],
+ },
'v8_linux_arm64_rel_ng_triggered': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@@ -1889,6 +1897,19 @@
},
],
},
+ 'V8 Linux64 - arm64 - sim - heap sandbox - debug': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'swarming_task_attrs': {
+ 'expiration': 14400,
+ 'hard_timeout': 7200,
+ 'priority': 35,
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 14},
+ ],
+ },
'V8 Linux - loong64 - sim': {
'swarming_dimensions': {
'os': 'Ubuntu-18.04',
@@ -2029,134 +2050,13 @@
},
{
'name': 'numfuzz',
- 'suffix': 'combined',
- 'test_args': [
- '--total-timeout-sec=2100',
- '--stress-delay-tasks=4',
- '--stress-deopt=2',
- '--stress-compaction=2',
- '--stress-gc=4',
- '--stress-marking=4',
- '--stress-scavenge=4',
- '--stress-thread-pool-size=2',
- ],
- 'shards': 4
- },
- {
- 'name': 'numfuzz',
- 'suffix': 'scavenge',
- 'test_args': ['--total-timeout-sec=2100', '--stress-scavenge=1']
- },
- ],
- },
- 'V8 NumFuzz - debug': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-18.04',
- },
- 'swarming_task_attrs': {
- 'expiration': 13800,
- 'hard_timeout': 4200,
- 'priority': 35,
- },
- 'tests': [
- {'name': 'd8testing_random_gc'},
- {
- 'name': 'numfuzz',
- 'suffix': 'marking',
- 'test_args': ['--total-timeout-sec=2100', '--stress-marking=1'],
- 'shards': 2
- },
- {
- 'name': 'numfuzz',
- 'suffix': 'delay',
- 'test_args': ['--total-timeout-sec=2100', '--stress-delay-tasks=1']
- },
- {
- 'name': 'numfuzz',
- 'suffix': 'threads',
- 'test_args': ['--total-timeout-sec=2100', '--stress-thread-pool-size=1']
- },
- {
- 'name': 'numfuzz',
- 'suffix': 'combined',
- 'test_args': [
- '--total-timeout-sec=2100',
- '--stress-delay-tasks=4',
- '--stress-deopt=2',
- '--stress-compaction=2',
- '--stress-gc=4',
- '--stress-marking=4',
- '--stress-scavenge=4',
- '--stress-thread-pool-size=2',
- ],
- 'shards': 3
- },
- {
- 'name': 'numfuzz',
- 'suffix': 'scavenge',
- 'test_args': ['--total-timeout-sec=2100', '--stress-scavenge=1']
- },
- {
- 'name': 'numfuzz',
- 'suffix': 'deopt',
- 'test_args': ['--total-timeout-sec=2100', '--stress-deopt=1'],
- 'shards': 2
- },
- ],
- },
- 'V8 NumFuzz - staging': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-18.04',
- },
- 'swarming_task_attrs': {
- 'expiration': 13800,
- 'hard_timeout': 4200,
- 'priority': 35,
- },
- 'tests': [
- {
- 'name': 'numfuzz',
- 'suffix': 'deopt',
- 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-deopt=1']
- },
- ],
- },
- 'V8 NumFuzz - TSAN - staging': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-18.04',
- },
- 'swarming_task_attrs': {
- 'expiration': 13800,
- 'hard_timeout': 4200,
- 'priority': 35,
- },
- 'tests': [
- {'name': 'd8testing_random_gc', 'shards': 2},
- {
- 'name': 'numfuzz',
- 'suffix': 'marking',
- 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-marking=1']
- },
- {
- 'name': 'numfuzz',
- 'suffix': 'delay',
- 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-delay-tasks=1']
- },
- {
- 'name': 'numfuzz',
- 'suffix': 'threads',
- 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-thread-pool-size=1']
- },
- {
- 'name': 'numfuzz',
'suffix': 'stack',
- 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-stack-size=1']
+ 'test_args': ['--total-timeout-sec=2100', '--stress-stack-size=1']
},
{
'name': 'numfuzz',
'suffix': 'combined',
'test_args': [
- '--infra-staging',
'--total-timeout-sec=2100',
'--stress-delay-tasks=4',
'--stress-deopt=2',
@@ -2172,11 +2072,11 @@
{
'name': 'numfuzz',
'suffix': 'scavenge',
- 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-scavenge=1']
+ 'test_args': ['--total-timeout-sec=2100', '--stress-scavenge=1']
},
],
},
- 'V8 NumFuzz - debug - staging': {
+ 'V8 NumFuzz - debug': {
'swarming_dimensions': {
'os': 'Ubuntu-18.04',
},
@@ -2190,29 +2090,28 @@
{
'name': 'numfuzz',
'suffix': 'marking',
- 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-marking=1'],
+ 'test_args': ['--total-timeout-sec=2100', '--stress-marking=1'],
'shards': 2
},
{
'name': 'numfuzz',
'suffix': 'delay',
- 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-delay-tasks=1']
+ 'test_args': ['--total-timeout-sec=2100', '--stress-delay-tasks=1']
},
{
'name': 'numfuzz',
'suffix': 'threads',
- 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-thread-pool-size=1']
+ 'test_args': ['--total-timeout-sec=2100', '--stress-thread-pool-size=1']
},
{
'name': 'numfuzz',
'suffix': 'stack',
- 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-stack-size=1']
+ 'test_args': ['--total-timeout-sec=2100', '--stress-stack-size=1']
},
{
'name': 'numfuzz',
'suffix': 'combined',
'test_args': [
- '--infra-staging',
'--total-timeout-sec=2100',
'--stress-delay-tasks=4',
'--stress-deopt=2',
@@ -2228,12 +2127,12 @@
{
'name': 'numfuzz',
'suffix': 'scavenge',
- 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-scavenge=1']
+ 'test_args': ['--total-timeout-sec=2100', '--stress-scavenge=1']
},
{
'name': 'numfuzz',
'suffix': 'deopt',
- 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-deopt=1'],
+ 'test_args': ['--total-timeout-sec=2100', '--stress-deopt=1'],
'shards': 2
},
],
@@ -2242,33 +2141,18 @@
'swarming_dimensions': {
'os': 'Ubuntu-18.04',
},
- 'swarming_task_attrs': {
- 'expiration': 13800,
- 'hard_timeout': 4200,
- 'priority': 35,
- },
'tests': [
{
'name': 'numfuzz',
'suffix': 'deopt',
'test_args': ['--total-timeout-sec=900', '--stress-deopt=1']
},
- {
- 'name': 'numfuzz',
- 'suffix': 'deopt-staging',
- 'test_args': ['--infra-staging', '--total-timeout-sec=900', '--stress-deopt=1']
- },
],
},
'v8_numfuzz_tsan_ng_triggered': {
'swarming_dimensions': {
'os': 'Ubuntu-18.04',
},
- 'swarming_task_attrs': {
- 'expiration': 13800,
- 'hard_timeout': 4200,
- 'priority': 35,
- },
'tests': [
{
'name': 'numfuzz',
@@ -2287,23 +2171,13 @@
},
{
'name': 'numfuzz',
- 'suffix': 'combined',
- 'test_args': [
- '--total-timeout-sec=900',
- '--stress-delay-tasks=4',
- '--stress-deopt=2',
- '--stress-compaction=2',
- '--stress-gc=4',
- '--stress-marking=4',
- '--stress-scavenge=4',
- '--stress-thread-pool-size=2',
- ],
+ 'suffix': 'stack',
+ 'test_args': ['--total-timeout-sec=900', '--stress-stack-size=1']
},
{
'name': 'numfuzz',
- 'suffix': 'combined-staging',
+ 'suffix': 'combined',
'test_args': [
- '--infra-staging',
'--total-timeout-sec=900',
'--stress-delay-tasks=4',
'--stress-deopt=2',
@@ -2312,6 +2186,7 @@
'--stress-marking=4',
'--stress-scavenge=4',
'--stress-thread-pool-size=2',
+ '--stress-stack-size=1',
],
},
{
@@ -2325,13 +2200,7 @@
'swarming_dimensions': {
'os': 'Ubuntu-18.04',
},
- 'swarming_task_attrs': {
- 'expiration': 13800,
- 'hard_timeout': 4200,
- 'priority': 35,
- },
'tests': [
- {'name': 'd8testing_random_gc'},
{
'name': 'numfuzz',
'suffix': 'marking',
@@ -2349,23 +2218,13 @@
},
{
'name': 'numfuzz',
- 'suffix': 'combined',
- 'test_args': [
- '--total-timeout-sec=900',
- '--stress-delay-tasks=4',
- '--stress-deopt=2',
- '--stress-compaction=2',
- '--stress-gc=4',
- '--stress-marking=4',
- '--stress-scavenge=4',
- '--stress-thread-pool-size=2',
- ],
+ 'suffix': 'stack',
+ 'test_args': ['--total-timeout-sec=900', '--stress-stack-size=1']
},
{
'name': 'numfuzz',
- 'suffix': 'combined-staging',
+ 'suffix': 'combined',
'test_args': [
- '--infra-staging',
'--total-timeout-sec=900',
'--stress-delay-tasks=4',
'--stress-deopt=2',
@@ -2374,6 +2233,7 @@
'--stress-marking=4',
'--stress-scavenge=4',
'--stress-thread-pool-size=2',
+ '--stress-stack-size=1',
],
},
{
diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc
index 933f138542..ab8abeb71e 100644
--- a/deps/v8/samples/shell.cc
+++ b/deps/v8/samples/shell.cc
@@ -381,8 +381,8 @@ void ReportException(v8::Isolate* isolate, v8::TryCatch* try_catch) {
stack_trace_string->IsString() &&
stack_trace_string.As<v8::String>()->Length() > 0) {
v8::String::Utf8Value stack_trace(isolate, stack_trace_string);
- const char* stack_trace_string = ToCString(stack_trace);
- fprintf(stderr, "%s\n", stack_trace_string);
+ const char* err = ToCString(stack_trace);
+ fprintf(stderr, "%s\n", err);
}
}
}
diff --git a/deps/v8/src/api/api-inl.h b/deps/v8/src/api/api-inl.h
index c5c774800b..c033c3d2e8 100644
--- a/deps/v8/src/api/api-inl.h
+++ b/deps/v8/src/api/api-inl.h
@@ -264,12 +264,12 @@ void CopyDoubleElementsToTypedBuffer(T* dst, uint32_t length,
}
}
-template <const CTypeInfo* type_info, typename T>
+template <CTypeInfo::Identifier type_info_id, typename T>
bool CopyAndConvertArrayToCppBuffer(Local<Array> src, T* dst,
uint32_t max_length) {
static_assert(
- std::is_same<
- T, typename i::CTypeInfoTraits<type_info->GetType()>::ctype>::value,
+ std::is_same<T, typename i::CTypeInfoTraits<
+ CTypeInfo(type_info_id).GetType()>::ctype>::value,
"Type mismatch between the expected CTypeInfo::Type and the destination "
"array");
@@ -299,11 +299,20 @@ bool CopyAndConvertArrayToCppBuffer(Local<Array> src, T* dst,
}
}
+// Deprecated; to be removed.
template <const CTypeInfo* type_info, typename T>
inline bool V8_EXPORT TryCopyAndConvertArrayToCppBuffer(Local<Array> src,
T* dst,
uint32_t max_length) {
- return CopyAndConvertArrayToCppBuffer<type_info, T>(src, dst, max_length);
+ return CopyAndConvertArrayToCppBuffer<type_info->GetId(), T>(src, dst,
+ max_length);
+}
+
+template <CTypeInfo::Identifier type_info_id, typename T>
+inline bool V8_EXPORT TryToCopyAndConvertArrayToCppBuffer(Local<Array> src,
+ T* dst,
+ uint32_t max_length) {
+ return CopyAndConvertArrayToCppBuffer<type_info_id, T>(src, dst, max_length);
}
namespace internal {
diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc
index 8dcfe8a5a6..f79d0482ed 100644
--- a/deps/v8/src/api/api.cc
+++ b/deps/v8/src/api/api.cc
@@ -7,6 +7,7 @@
#include <algorithm> // For min
#include <cmath> // For isnan.
#include <limits>
+#include <sstream>
#include <string>
#include <utility> // For move
#include <vector>
@@ -107,7 +108,6 @@
#include "src/profiler/heap-snapshot-generator-inl.h"
#include "src/profiler/profile-generator-inl.h"
#include "src/profiler/tick-sample.h"
-#include "src/regexp/regexp-stack.h"
#include "src/regexp/regexp-utils.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/code-serializer.h"
@@ -407,7 +407,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
}
private:
- PageAllocator* page_allocator_ = internal::GetPlatformDataCagePageAllocator();
+ PageAllocator* page_allocator_ = internal::GetArrayBufferPageAllocator();
const size_t page_size_ = page_allocator_->AllocatePageSize();
};
@@ -947,7 +947,7 @@ void HandleScope::Initialize(Isolate* isolate) {
// We make an exception if the serializer is enabled, which means that the
// Isolate is exclusively used to create a snapshot.
Utils::ApiCheck(
- !v8::Locker::IsActive() ||
+ !v8::Locker::WasEverUsed() ||
internal_isolate->thread_manager()->IsLockedByCurrentThread() ||
internal_isolate->serializer_enabled(),
"HandleScope::HandleScope",
@@ -2533,7 +2533,7 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
!source->GetResourceOptions().IsModule(), "v8::ScriptCompiler::Compile",
"v8::ScriptCompiler::CompileModule must be used to compile modules");
auto isolate = context->GetIsolate();
- auto maybe =
+ MaybeLocal<UnboundScript> maybe =
CompileUnboundInternal(isolate, source, options, no_cache_reason);
Local<UnboundScript> result;
if (!maybe.ToLocal(&result)) return MaybeLocal<Script>();
@@ -2550,11 +2550,10 @@ MaybeLocal<Module> ScriptCompiler::CompileModule(
Utils::ApiCheck(source->GetResourceOptions().IsModule(),
"v8::ScriptCompiler::CompileModule",
"Invalid ScriptOrigin: is_module must be true");
- auto maybe =
+ MaybeLocal<UnboundScript> maybe =
CompileUnboundInternal(isolate, source, options, no_cache_reason);
Local<UnboundScript> unbound;
if (!maybe.ToLocal(&unbound)) return MaybeLocal<Module>();
-
i::Handle<i::SharedFunctionInfo> shared = Utils::OpenHandle(*unbound);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
return ToApiHandle<Module>(i_isolate->factory()->NewSourceTextModule(shared));
@@ -3096,6 +3095,14 @@ MaybeLocal<String> Message::GetSourceLine(Local<Context> context) const {
void Message::PrintCurrentStackTrace(Isolate* isolate, FILE* out) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ std::ostringstream stack_trace_stream;
+ i_isolate->PrintCurrentStackTrace(stack_trace_stream);
+ i::PrintF(out, "%s", stack_trace_stream.str().c_str());
+}
+
+void Message::PrintCurrentStackTrace(Isolate* isolate, std::ostream& out) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i_isolate->PrintCurrentStackTrace(out);
}
@@ -5595,12 +5602,13 @@ static inline int WriteHelper(i::Isolate* isolate, const String* string,
int end = start + length;
if ((length == -1) || (length > str->length() - start)) end = str->length();
if (end < 0) return 0;
- if (start < end) i::String::WriteToFlat(*str, buffer, start, end);
+ int write_length = end - start;
+ if (start < end) i::String::WriteToFlat(*str, buffer, start, write_length);
if (!(options & String::NO_NULL_TERMINATION) &&
- (length == -1 || end - start < length)) {
- buffer[end - start] = '\0';
+ (length == -1 || write_length < length)) {
+ buffer[write_length] = '\0';
}
- return end - start;
+ return write_length;
}
int String::WriteOneByte(Isolate* isolate, uint8_t* buffer, int start,
@@ -6080,9 +6088,17 @@ void v8::V8::InitializeExternalStartupDataFromFile(const char* snapshot_blob) {
const char* v8::V8::GetVersion() { return i::Version::GetVersion(); }
#ifdef V8_VIRTUAL_MEMORY_CAGE
-PageAllocator* v8::V8::GetVirtualMemoryCageDataPageAllocator() {
+PageAllocator* v8::V8::GetVirtualMemoryCagePageAllocator() {
CHECK(i::GetProcessWideVirtualMemoryCage()->is_initialized());
- return i::GetProcessWideVirtualMemoryCage()->GetDataCagePageAllocator();
+ return i::GetProcessWideVirtualMemoryCage()->page_allocator();
+}
+
+size_t v8::V8::GetVirtualMemoryCageSizeInBytes() {
+ if (!i::GetProcessWideVirtualMemoryCage()->is_initialized()) {
+ return 0;
+ } else {
+ return i::GetProcessWideVirtualMemoryCage()->size();
+ }
}
#endif
@@ -6356,7 +6372,7 @@ void Context::DetachGlobal() {
i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Isolate* isolate = context->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- isolate->bootstrapper()->DetachGlobal(context);
+ isolate->DetachGlobal(context);
}
Local<v8::Object> Context::GetExtrasBindingObject() {
@@ -7155,7 +7171,7 @@ REGEXP_FLAG_ASSERT_EQ(kLinear);
v8::RegExp::Flags v8::RegExp::GetFlags() const {
i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
- return RegExp::Flags(static_cast<int>(obj->GetFlags()));
+ return RegExp::Flags(static_cast<int>(obj->flags()));
}
MaybeLocal<v8::Object> v8::RegExp::Exec(Local<Context> context,
@@ -9020,7 +9036,7 @@ void Isolate::IsolateInBackgroundNotification() {
void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
bool on_isolate_thread =
- v8::Locker::IsActive()
+ v8::Locker::WasEverUsed()
? isolate->thread_manager()->IsLockedByCurrentThread()
: i::ThreadId::Current() == isolate->thread_id();
isolate->heap()->MemoryPressureNotification(level, on_isolate_thread);
@@ -9162,6 +9178,10 @@ CALLBACK_SETTER(WasmSimdEnabledCallback, WasmSimdEnabledCallback,
CALLBACK_SETTER(WasmExceptionsEnabledCallback, WasmExceptionsEnabledCallback,
wasm_exceptions_enabled_callback)
+CALLBACK_SETTER(WasmDynamicTieringEnabledCallback,
+ WasmDynamicTieringEnabledCallback,
+ wasm_dynamic_tiering_enabled_callback)
+
CALLBACK_SETTER(SharedArrayBufferConstructorEnabledCallback,
SharedArrayBufferConstructorEnabledCallback,
sharedarraybuffer_constructor_enabled_callback)
@@ -9305,7 +9325,7 @@ void v8::Isolate::LocaleConfigurationChangeNotification() {
#ifdef V8_INTL_SUPPORT
i_isolate->ResetDefaultLocale();
- i_isolate->ClearCachedIcuObjects();
+ i_isolate->clear_cached_icu_objects();
#endif // V8_INTL_SUPPORT
}
@@ -10418,16 +10438,44 @@ bool ConvertDouble(double d) {
} // namespace internal
-bool CopyAndConvertArrayToCppBufferInt32(Local<Array> src, int32_t* dst,
- uint32_t max_length) {
- return CopyAndConvertArrayToCppBuffer<&v8::kTypeInfoInt32, int32_t>(
- src, dst, max_length);
+template <>
+bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer<
+ internal::CTypeInfoBuilder<int32_t>::Build().GetId(), int32_t>(
+ Local<Array> src, int32_t* dst, uint32_t max_length) {
+ return CopyAndConvertArrayToCppBuffer<
+ CTypeInfo(CTypeInfo::Type::kInt32, CTypeInfo::SequenceType::kIsSequence)
+ .GetId(),
+ int32_t>(src, dst, max_length);
+}
+
+template <>
+bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer<
+ internal::CTypeInfoBuilder<uint32_t>::Build().GetId(), uint32_t>(
+ Local<Array> src, uint32_t* dst, uint32_t max_length) {
+ return CopyAndConvertArrayToCppBuffer<
+ CTypeInfo(CTypeInfo::Type::kUint32, CTypeInfo::SequenceType::kIsSequence)
+ .GetId(),
+ uint32_t>(src, dst, max_length);
}
-bool CopyAndConvertArrayToCppBufferFloat64(Local<Array> src, double* dst,
- uint32_t max_length) {
- return CopyAndConvertArrayToCppBuffer<&v8::kTypeInfoFloat64, double>(
- src, dst, max_length);
+template <>
+bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer<
+ internal::CTypeInfoBuilder<float>::Build().GetId(), float>(
+ Local<Array> src, float* dst, uint32_t max_length) {
+ return CopyAndConvertArrayToCppBuffer<
+ CTypeInfo(CTypeInfo::Type::kFloat32, CTypeInfo::SequenceType::kIsSequence)
+ .GetId(),
+ float>(src, dst, max_length);
+}
+
+template <>
+bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer<
+ internal::CTypeInfoBuilder<double>::Build().GetId(), double>(
+ Local<Array> src, double* dst, uint32_t max_length) {
+ return CopyAndConvertArrayToCppBuffer<
+ CTypeInfo(CTypeInfo::Type::kFloat64, CTypeInfo::SequenceType::kIsSequence)
+ .GetId(),
+ double>(src, dst, max_length);
}
} // namespace v8
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index 09c520bbc0..b6743117fe 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -963,7 +963,6 @@ void AsmJsParser::ValidateFunctionLocals(size_t param_count,
if (Check('-')) {
negate = true;
}
- double dvalue = 0.0;
if (CheckForDouble(&dvalue)) {
info->kind = VarKind::kLocal;
info->type = AsmType::Float();
@@ -1671,9 +1670,9 @@ AsmType* AsmJsParser::MultiplicativeExpression() {
uint32_t uvalue;
if (CheckForUnsignedBelow(0x100000, &uvalue)) {
if (Check('*')) {
- AsmType* a;
- RECURSEn(a = UnaryExpression());
- if (!a->IsA(AsmType::Int())) {
+ AsmType* type;
+ RECURSEn(type = UnaryExpression());
+ if (!type->IsA(AsmType::Int())) {
FAILn("Expected int");
}
int32_t value = static_cast<int32_t>(uvalue);
@@ -1689,9 +1688,9 @@ AsmType* AsmJsParser::MultiplicativeExpression() {
int32_t value = -static_cast<int32_t>(uvalue);
current_function_builder_->EmitI32Const(value);
if (Check('*')) {
- AsmType* a;
- RECURSEn(a = UnaryExpression());
- if (!a->IsA(AsmType::Int())) {
+ AsmType* type;
+ RECURSEn(type = UnaryExpression());
+ if (!type->IsA(AsmType::Int())) {
FAILn("Expected int");
}
current_function_builder_->Emit(kExprI32Mul);
@@ -1707,7 +1706,6 @@ AsmType* AsmJsParser::MultiplicativeExpression() {
}
for (;;) {
if (Check('*')) {
- uint32_t uvalue;
if (Check('-')) {
if (!PeekForZero() && CheckForUnsigned(&uvalue)) {
if (uvalue >= 0x100000) {
@@ -2115,7 +2113,7 @@ AsmType* AsmJsParser::ValidateCall() {
// both cases we might be seeing the {function_name} for the first time and
// hence allocate a {VarInfo} here, all subsequent uses of the same name then
// need to match the information stored at this point.
- base::Optional<TemporaryVariableScope> tmp;
+ base::Optional<TemporaryVariableScope> tmp_scope;
if (Check('[')) {
AsmType* index = nullptr;
RECURSEn(index = EqualityExpression());
@@ -2138,13 +2136,13 @@ AsmType* AsmJsParser::ValidateCall() {
if (module_builder_->NumTables() == 0) {
module_builder_->AddTable(kWasmFuncRef, 0);
}
- uint32_t index = module_builder_->IncreaseTableMinSize(0, mask + 1);
- if (index == std::numeric_limits<uint32_t>::max()) {
+ uint32_t func_index = module_builder_->IncreaseTableMinSize(0, mask + 1);
+ if (func_index == std::numeric_limits<uint32_t>::max()) {
FAILn("Exceeded maximum function table size");
}
function_info->kind = VarKind::kTable;
function_info->mask = mask;
- function_info->index = index;
+ function_info->index = func_index;
function_info->mutable_variable = false;
} else {
if (function_info->kind != VarKind::kTable) {
@@ -2157,8 +2155,8 @@ AsmType* AsmJsParser::ValidateCall() {
current_function_builder_->EmitI32Const(function_info->index);
current_function_builder_->Emit(kExprI32Add);
// We have to use a temporary for the correct order of evaluation.
- tmp.emplace(this);
- current_function_builder_->EmitSetLocal(tmp->get());
+ tmp_scope.emplace(this);
+ current_function_builder_->EmitSetLocal(tmp_scope->get());
// The position of function table calls is after the table lookup.
call_pos = scanner_.Position();
} else {
@@ -2394,7 +2392,7 @@ AsmType* AsmJsParser::ValidateCall() {
}
}
if (function_info->kind == VarKind::kTable) {
- current_function_builder_->EmitGetLocal(tmp->get());
+ current_function_builder_->EmitGetLocal(tmp_scope->get());
current_function_builder_->AddAsmWasmOffset(call_pos, to_number_pos);
current_function_builder_->Emit(kExprCallIndirect);
current_function_builder_->EmitU32V(signature_index);
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index cf57b9e9b7..ac89df574d 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -529,9 +529,10 @@ int ArrayLiteral::InitDepthAndFlags() {
int array_index = 0;
for (; array_index < constants_length; array_index++) {
Expression* element = values()->at(array_index);
- MaterializedLiteral* literal = element->AsMaterializedLiteral();
- if (literal != nullptr) {
- int subliteral_depth = literal->InitDepthAndFlags() + 1;
+ MaterializedLiteral* materialized_literal =
+ element->AsMaterializedLiteral();
+ if (materialized_literal != nullptr) {
+ int subliteral_depth = materialized_literal->InitDepthAndFlags() + 1;
if (subliteral_depth > depth_acc) depth_acc = subliteral_depth;
}
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index 6a68a80cdc..44f4ea155f 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -346,17 +346,12 @@ void CallPrinter::VisitAssignment(Assignment* node) {
Find(node->target());
if (node->target()->IsArrayLiteral()) {
// Special case the visit for destructuring array assignment.
- bool was_found = false;
if (node->value()->position() == position_) {
is_iterator_error_ = true;
was_found = !found_;
found_ = true;
}
Find(node->value(), true);
- if (was_found) {
- done_ = true;
- found_ = false;
- }
} else {
Find(node->value());
}
@@ -967,7 +962,7 @@ void AstPrinter::VisitWithStatement(WithStatement* node) {
void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
- IndentedScope indent(this, "SWITCH", node->position());
+ IndentedScope switch_indent(this, "SWITCH", node->position());
PrintIndentedVisit("TAG", node->tag());
for (CaseClause* clause : *node->cases()) {
if (clause->is_default()) {
@@ -1247,7 +1242,7 @@ void AstPrinter::PrintObjectProperties(
void AstPrinter::VisitArrayLiteral(ArrayLiteral* node) {
- IndentedScope indent(this, "ARRAY LITERAL", node->position());
+ IndentedScope array_indent(this, "ARRAY LITERAL", node->position());
if (node->values()->length() > 0) {
IndentedScope indent(this, "VALUES", node->position());
for (int i = 0; i < node->values()->length(); i++) {
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index 4ffc36a3a2..a61c43e14e 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -29,7 +29,7 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
// The following routine prints the node with position |position| into a
// string.
Handle<String> Print(FunctionLiteral* program, int position);
- enum ErrorHint {
+ enum class ErrorHint {
kNone,
kNormalIterator,
kAsyncIterator,
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index 94782cab30..bf490a42bb 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -566,7 +566,6 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
// Check if there's a conflict with a lexical declaration
Scope* query_scope = sloppy_block_function->scope()->outer_scope();
- Variable* var = nullptr;
bool should_hoist = true;
// It is not sufficient to just do a Lookup on query_scope: for
@@ -576,7 +575,7 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
// Don't use a generic cache scope, as the cache scope would be the outer
// scope and we terminate the iteration there anyway.
do {
- var = query_scope->LookupInScopeOrScopeInfo(name, query_scope);
+ Variable* var = query_scope->LookupInScopeOrScopeInfo(name, query_scope);
if (var != nullptr && IsLexicalVariableMode(var->mode())) {
should_hoist = false;
break;
@@ -840,12 +839,12 @@ void Scope::Snapshot::Reparent(DeclarationScope* new_parent) {
new_parent->sibling_ = top_inner_scope_;
}
- Scope* outer_scope_ = outer_scope_and_calls_eval_.GetPointer();
- new_parent->unresolved_list_.MoveTail(&outer_scope_->unresolved_list_,
+ Scope* outer_scope = outer_scope_and_calls_eval_.GetPointer();
+ new_parent->unresolved_list_.MoveTail(&outer_scope->unresolved_list_,
top_unresolved_);
// Move temporaries allocated for complex parameter initializers.
- DeclarationScope* outer_closure = outer_scope_->GetClosureScope();
+ DeclarationScope* outer_closure = outer_scope->GetClosureScope();
for (auto it = top_local_; it != outer_closure->locals()->end(); ++it) {
Variable* local = *it;
DCHECK_EQ(VariableMode::kTemporary, local->mode());
@@ -2014,7 +2013,7 @@ Variable* Scope::Lookup(VariableProxy* proxy, Scope* scope,
// scope when we get to it (we may still have deserialized scopes
// in-between the initial and cache scopes so we can't just check the
// cache before the loop).
- Variable* var = scope->variables_.Lookup(proxy->raw_name());
+ var = scope->variables_.Lookup(proxy->raw_name());
if (var != nullptr) return var;
}
var = scope->LookupInScopeInfo(proxy->raw_name(),
@@ -2063,7 +2062,7 @@ Variable* Scope::Lookup(VariableProxy* proxy, Scope* scope,
// TODO(verwaest): Separate through AnalyzePartially.
if (mode == kParsedScope && !scope->scope_info_.is_null()) {
DCHECK_NULL(cache_scope);
- Scope* cache_scope = scope->GetNonEvalDeclarationScope();
+ cache_scope = scope->GetNonEvalDeclarationScope();
return Lookup<kDeserializedScope>(proxy, scope, outer_scope_end,
cache_scope);
}
diff --git a/deps/v8/src/base/bounded-page-allocator.cc b/deps/v8/src/base/bounded-page-allocator.cc
index 0143b179ff..e5f090682f 100644
--- a/deps/v8/src/base/bounded-page-allocator.cc
+++ b/deps/v8/src/base/bounded-page-allocator.cc
@@ -7,13 +7,14 @@
namespace v8 {
namespace base {
-BoundedPageAllocator::BoundedPageAllocator(v8::PageAllocator* page_allocator,
- Address start, size_t size,
- size_t allocate_page_size)
+BoundedPageAllocator::BoundedPageAllocator(
+ v8::PageAllocator* page_allocator, Address start, size_t size,
+ size_t allocate_page_size, PageInitializationMode page_initialization_mode)
: allocate_page_size_(allocate_page_size),
commit_page_size_(page_allocator->CommitPageSize()),
page_allocator_(page_allocator),
- region_allocator_(start, size, allocate_page_size_) {
+ region_allocator_(start, size, allocate_page_size_),
+ page_initialization_mode_(page_initialization_mode) {
DCHECK_NOT_NULL(page_allocator);
DCHECK(IsAligned(allocate_page_size, page_allocator->AllocatePageSize()));
DCHECK(IsAligned(allocate_page_size_, commit_page_size_));
@@ -110,16 +111,17 @@ bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
Address address = reinterpret_cast<Address>(raw_address);
size_t freed_size = region_allocator_.FreeRegion(address);
if (freed_size != size) return false;
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- // When the virtual memory cage is enabled, the pages returned by the
- // BoundedPageAllocator must be zero-initialized, as some of the additional
- // clients expect them to. Decommitting them during FreePages ensures that
- // while also changing the access permissions to kNoAccess.
- CHECK(page_allocator_->DecommitPages(raw_address, size));
-#else
- CHECK(page_allocator_->SetPermissions(raw_address, size,
- PageAllocator::kNoAccess));
-#endif
+ if (page_initialization_mode_ ==
+ PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) {
+ // When we are required to return zero-initialized pages, we decommit the
+ // pages here, which will cause any wired pages to be removed by the OS.
+ CHECK(page_allocator_->DecommitPages(raw_address, size));
+ } else {
+ DCHECK_EQ(page_initialization_mode_,
+ PageInitializationMode::kAllocatedPagesCanBeUninitialized);
+ CHECK(page_allocator_->SetPermissions(raw_address, size,
+ PageAllocator::kNoAccess));
+ }
return true;
}
@@ -152,14 +154,18 @@ bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
// Keep the region in "used" state just uncommit some pages.
Address free_address = address + new_size;
size_t free_size = size - new_size;
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- // See comment in FreePages().
- return page_allocator_->DecommitPages(reinterpret_cast<void*>(free_address),
- free_size);
-#else
- return page_allocator_->SetPermissions(reinterpret_cast<void*>(free_address),
- free_size, PageAllocator::kNoAccess);
-#endif
+ if (page_initialization_mode_ ==
+ PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) {
+ // See comment in FreePages().
+ return page_allocator_->DecommitPages(reinterpret_cast<void*>(free_address),
+ free_size);
+ } else {
+ DCHECK_EQ(page_initialization_mode_,
+ PageInitializationMode::kAllocatedPagesCanBeUninitialized);
+ return page_allocator_->SetPermissions(
+ reinterpret_cast<void*>(free_address), free_size,
+ PageAllocator::kNoAccess);
+ }
}
bool BoundedPageAllocator::SetPermissions(void* address, size_t size,
diff --git a/deps/v8/src/base/bounded-page-allocator.h b/deps/v8/src/base/bounded-page-allocator.h
index db364255f1..a98a2299f8 100644
--- a/deps/v8/src/base/bounded-page-allocator.h
+++ b/deps/v8/src/base/bounded-page-allocator.h
@@ -12,10 +12,23 @@
namespace v8 {
namespace base {
+// Defines the page initialization mode of a BoundedPageAllocator.
+enum class PageInitializationMode {
+ // The contents of allocated pages must be zero initialized. This causes any
+ // committed pages to be decommitted during FreePages and ReleasePages. This
+ // requires the embedder to provide the PageAllocator::DecommitPages API.
+ kAllocatedPagesMustBeZeroInitialized,
+ // Allocated pages do not have to be be zero initialized and can contain old
+ // data. This is slightly faster as comitted pages are not decommitted
+ // during FreePages and ReleasePages, but only made inaccessible.
+ kAllocatedPagesCanBeUninitialized,
+};
+
// This is a v8::PageAllocator implementation that allocates pages within the
// pre-reserved region of virtual space. This class requires the virtual space
// to be kept reserved during the lifetime of this object.
// The main application of bounded page allocator are
+// - the V8 virtual memory cage
// - V8 heap pointer compression which requires the whole V8 heap to be
// allocated within a contiguous range of virtual address space,
// - executable page allocation, which allows to use PC-relative 32-bit code
@@ -28,7 +41,8 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
using Address = uintptr_t;
BoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
- size_t size, size_t allocate_page_size);
+ size_t size, size_t allocate_page_size,
+ PageInitializationMode page_initialization_mode);
BoundedPageAllocator(const BoundedPageAllocator&) = delete;
BoundedPageAllocator& operator=(const BoundedPageAllocator&) = delete;
~BoundedPageAllocator() override = default;
@@ -79,6 +93,7 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
const size_t commit_page_size_;
v8::PageAllocator* const page_allocator_;
v8::base::RegionAllocator region_allocator_;
+ const PageInitializationMode page_initialization_mode_;
};
} // namespace base
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index fca0b2ebb2..3a73afc1ce 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -18,6 +18,11 @@
// This macro does nothing. That's all.
#define NOTHING(...)
+#define CONCAT_(a, b) a##b
+#define CONCAT(a, b) CONCAT_(a, b)
+// Creates an unique identifier. Useful for scopes to avoid shadowing names.
+#define UNIQUE_IDENTIFIER(base) CONCAT(base, __COUNTER__)
+
// TODO(all) Replace all uses of this macro with C++'s offsetof. To do that, we
// have to make sure that only standard-layout types and simple field
// designators are used.
@@ -162,6 +167,13 @@ V8_INLINE Dest bit_cast(Source const& source) {
#endif
#endif
+// Define V8_USE_UNDEFINED_BEHAVIOR_SANITIZER macro.
+#if defined(__has_feature)
+#if __has_feature(undefined_behavior_sanitizer)
+#define V8_USE_UNDEFINED_BEHAVIOR_SANITIZER 1
+#endif
+#endif
+
// DISABLE_CFI_PERF -- Disable Control Flow Integrity checks for Perf reasons.
#define DISABLE_CFI_PERF V8_CLANG_NO_SANITIZE("cfi")
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index c51012c3f1..11dba08d79 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -134,8 +134,11 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
}
bool OS::DecommitPages(void* address, size_t size) {
- // TODO(chromium:1218005): support this.
- return false;
+ // We rely on DiscardSystemPages decommitting the pages immediately (via
+ // ZX_VMO_OP_DECOMMIT) so that they are guaranteed to be zero-initialized
+ // should they be accessed again later on.
+ return SetPermissions(address, size, MemoryPermission::kNoAccess) &&
+ DiscardSystemPages(address, size);
}
// static
diff --git a/deps/v8/src/base/vlq.h b/deps/v8/src/base/vlq.h
index 96ee42cf6e..25dba27bfb 100644
--- a/deps/v8/src/base/vlq.h
+++ b/deps/v8/src/base/vlq.h
@@ -91,7 +91,7 @@ VLQDecodeUnsigned(GetNextFunction&& get_next) {
}
uint32_t bits = cur_byte & kDataMask;
for (int shift = kContinueShift; shift <= 32; shift += kContinueShift) {
- byte cur_byte = get_next();
+ cur_byte = get_next();
bits |= (cur_byte & kDataMask) << shift;
if (cur_byte <= kDataMask) break;
}
diff --git a/deps/v8/src/baseline/baseline-batch-compiler.cc b/deps/v8/src/baseline/baseline-batch-compiler.cc
index fb66139a31..249702bd62 100644
--- a/deps/v8/src/baseline/baseline-batch-compiler.cc
+++ b/deps/v8/src/baseline/baseline-batch-compiler.cc
@@ -12,6 +12,7 @@
#include "src/baseline/baseline-compiler.h"
#include "src/codegen/compiler.h"
#include "src/execution/isolate.h"
+#include "src/handles/global-handles-inl.h"
#include "src/heap/factory-inl.h"
#include "src/heap/heap-inl.h"
#include "src/objects/fixed-array-inl.h"
diff --git a/deps/v8/src/baseline/bytecode-offset-iterator.cc b/deps/v8/src/baseline/bytecode-offset-iterator.cc
index bbedac8ef3..d2504b62e9 100644
--- a/deps/v8/src/baseline/bytecode-offset-iterator.cc
+++ b/deps/v8/src/baseline/bytecode-offset-iterator.cc
@@ -36,7 +36,7 @@ BytecodeOffsetIterator::BytecodeOffsetIterator(ByteArray mapping_table,
bytecode_iterator_(Handle<BytecodeArray>(
reinterpret_cast<Address*>(&bytecode_handle_storage_))),
local_heap_(nullptr) {
- no_gc.emplace();
+ no_gc_.emplace();
Initialize();
}
diff --git a/deps/v8/src/baseline/bytecode-offset-iterator.h b/deps/v8/src/baseline/bytecode-offset-iterator.h
index 6e78fba061..9581a2a1f4 100644
--- a/deps/v8/src/baseline/bytecode-offset-iterator.h
+++ b/deps/v8/src/baseline/bytecode-offset-iterator.h
@@ -88,7 +88,7 @@ class V8_EXPORT_PRIVATE BytecodeOffsetIterator {
BytecodeArray bytecode_handle_storage_;
interpreter::BytecodeArrayIterator bytecode_iterator_;
LocalHeap* local_heap_;
- base::Optional<DisallowGarbageCollection> no_gc;
+ base::Optional<DisallowGarbageCollection> no_gc_;
};
} // namespace baseline
diff --git a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
index 663462fdb5..7bf6bd2f4e 100644
--- a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
+++ b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
@@ -434,7 +434,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
reg, Operand(int64_t(num_labels)));
int64_t imm64;
imm64 = __ branch_long_offset(&table);
- DCHECK(is_int32(imm64));
+ CHECK(is_int32(imm64 + 0x800));
int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
__ auipc(t6, Hi20); // Read PC + Hi20 into t6
diff --git a/deps/v8/src/bigint/bigint.h b/deps/v8/src/bigint/bigint.h
index 47159d0bf4..28df2936ac 100644
--- a/deps/v8/src/bigint/bigint.h
+++ b/deps/v8/src/bigint/bigint.h
@@ -227,12 +227,40 @@ void Add(RWDigits Z, Digits X, Digits Y);
// Addition of signed integers. Returns true if the result is negative.
bool AddSigned(RWDigits Z, Digits X, bool x_negative, Digits Y,
bool y_negative);
+// Z := X + 1
+void AddOne(RWDigits Z, Digits X);
// Z := X - Y. Requires X >= Y.
void Subtract(RWDigits Z, Digits X, Digits Y);
// Subtraction of signed integers. Returns true if the result is negative.
bool SubtractSigned(RWDigits Z, Digits X, bool x_negative, Digits Y,
bool y_negative);
+// Z := X - 1
+void SubtractOne(RWDigits Z, Digits X);
+
+// The bitwise operations assume that negative BigInts are represented as
+// sign+magnitude. Their behavior depends on the sign of the inputs: negative
+// inputs perform an implicit conversion to two's complement representation.
+// Z := X & Y
+void BitwiseAnd_PosPos(RWDigits Z, Digits X, Digits Y);
+// Call this for a BigInt x = (magnitude=X, negative=true).
+void BitwiseAnd_NegNeg(RWDigits Z, Digits X, Digits Y);
+// Positive X, negative Y. Callers must swap arguments as needed.
+void BitwiseAnd_PosNeg(RWDigits Z, Digits X, Digits Y);
+void BitwiseOr_PosPos(RWDigits Z, Digits X, Digits Y);
+void BitwiseOr_NegNeg(RWDigits Z, Digits X, Digits Y);
+void BitwiseOr_PosNeg(RWDigits Z, Digits X, Digits Y);
+void BitwiseXor_PosPos(RWDigits Z, Digits X, Digits Y);
+void BitwiseXor_NegNeg(RWDigits Z, Digits X, Digits Y);
+void BitwiseXor_PosNeg(RWDigits Z, Digits X, Digits Y);
+
+// Z := (least significant n bits of X, interpreted as a signed n-bit integer).
+// Returns true if the result is negative; Z will hold the absolute value.
+bool AsIntN(RWDigits Z, Digits X, bool x_negative, int n);
+// Z := (least significant n bits of X).
+void AsUintN_Pos(RWDigits Z, Digits X, int n);
+// Same, but X is the absolute value of a negative BigInt.
+void AsUintN_Neg(RWDigits Z, Digits X, int n);
enum class Status { kOk, kInterrupted };
@@ -303,6 +331,36 @@ int ToStringResultLength(Digits X, int radix, bool sign);
// In DEBUG builds, the result of {ToString} will be initialized to this value.
constexpr char kStringZapValue = '?';
+inline int BitwiseAnd_PosPos_ResultLength(int x_length, int y_length) {
+ return std::min(x_length, y_length);
+}
+inline int BitwiseAnd_NegNeg_ResultLength(int x_length, int y_length) {
+ // Result length growth example: -2 & -3 = -4 (2-bit inputs, 3-bit result).
+ return std::max(x_length, y_length) + 1;
+}
+inline int BitwiseAnd_PosNeg_ResultLength(int x_length) { return x_length; }
+inline int BitwiseOrResultLength(int x_length, int y_length) {
+ return std::max(x_length, y_length);
+}
+inline int BitwiseXor_PosPos_ResultLength(int x_length, int y_length) {
+ return std::max(x_length, y_length);
+}
+inline int BitwiseXor_NegNeg_ResultLength(int x_length, int y_length) {
+ return std::max(x_length, y_length);
+}
+inline int BitwiseXor_PosNeg_ResultLength(int x_length, int y_length) {
+ // Result length growth example: 3 ^ -1 == -4 (2-bit inputs, 3-bit result).
+ return std::max(x_length, y_length) + 1;
+}
+
+// Returns -1 if this "asIntN" operation would be a no-op.
+int AsIntNResultLength(Digits X, bool x_negative, int n);
+// Returns -1 if this "asUintN" operation would be a no-op.
+int AsUintN_Pos_ResultLength(Digits X, int n);
+inline int AsUintN_Neg_ResultLength(int n) {
+ return ((n - 1) / kDigitBits) + 1;
+}
+
// Support for parsing BigInts from Strings, using an Accumulator object
// for intermediate state.
diff --git a/deps/v8/src/bigint/bitwise.cc b/deps/v8/src/bigint/bitwise.cc
new file mode 100644
index 0000000000..087847c118
--- /dev/null
+++ b/deps/v8/src/bigint/bitwise.cc
@@ -0,0 +1,262 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/bigint/bigint-internal.h"
+#include "src/bigint/digit-arithmetic.h"
+#include "src/bigint/util.h"
+#include "src/bigint/vector-arithmetic.h"
+
+namespace v8 {
+namespace bigint {
+
+void BitwiseAnd_PosPos(RWDigits Z, Digits X, Digits Y) {
+ int pairs = std::min(X.len(), Y.len());
+ DCHECK(Z.len() >= pairs);
+ int i = 0;
+ for (; i < pairs; i++) Z[i] = X[i] & Y[i];
+ for (; i < Z.len(); i++) Z[i] = 0;
+}
+
+void BitwiseAnd_NegNeg(RWDigits Z, Digits X, Digits Y) {
+ // (-x) & (-y) == ~(x-1) & ~(y-1)
+ // == ~((x-1) | (y-1))
+ // == -(((x-1) | (y-1)) + 1)
+ int pairs = std::min(X.len(), Y.len());
+ digit_t x_borrow = 1;
+ digit_t y_borrow = 1;
+ int i = 0;
+ for (; i < pairs; i++) {
+ Z[i] = digit_sub(X[i], x_borrow, &x_borrow) |
+ digit_sub(Y[i], y_borrow, &y_borrow);
+ }
+ // (At least) one of the next two loops will perform zero iterations:
+ for (; i < X.len(); i++) Z[i] = digit_sub(X[i], x_borrow, &x_borrow);
+ for (; i < Y.len(); i++) Z[i] = digit_sub(Y[i], y_borrow, &y_borrow);
+ DCHECK(x_borrow == 0); // NOLINT(readability/check)
+ DCHECK(y_borrow == 0); // NOLINT(readability/check)
+ for (; i < Z.len(); i++) Z[i] = 0;
+ Add(Z, 1);
+}
+
+void BitwiseAnd_PosNeg(RWDigits Z, Digits X, Digits Y) {
+ // x & (-y) == x & ~(y-1)
+ int pairs = std::min(X.len(), Y.len());
+ digit_t borrow = 1;
+ int i = 0;
+ for (; i < pairs; i++) Z[i] = X[i] & ~digit_sub(Y[i], borrow, &borrow);
+ for (; i < X.len(); i++) Z[i] = X[i];
+ for (; i < Z.len(); i++) Z[i] = 0;
+}
+
+void BitwiseOr_PosPos(RWDigits Z, Digits X, Digits Y) {
+ int pairs = std::min(X.len(), Y.len());
+ int i = 0;
+ for (; i < pairs; i++) Z[i] = X[i] | Y[i];
+ // (At least) one of the next two loops will perform zero iterations:
+ for (; i < X.len(); i++) Z[i] = X[i];
+ for (; i < Y.len(); i++) Z[i] = Y[i];
+ for (; i < Z.len(); i++) Z[i] = 0;
+}
+
+void BitwiseOr_NegNeg(RWDigits Z, Digits X, Digits Y) {
+ // (-x) | (-y) == ~(x-1) | ~(y-1)
+ // == ~((x-1) & (y-1))
+ // == -(((x-1) & (y-1)) + 1)
+ int pairs = std::min(X.len(), Y.len());
+ digit_t x_borrow = 1;
+ digit_t y_borrow = 1;
+ int i = 0;
+ for (; i < pairs; i++) {
+ Z[i] = digit_sub(X[i], x_borrow, &x_borrow) &
+ digit_sub(Y[i], y_borrow, &y_borrow);
+ }
+ // Any leftover borrows don't matter, the '&' would drop them anyway.
+ for (; i < Z.len(); i++) Z[i] = 0;
+ Add(Z, 1);
+}
+
+void BitwiseOr_PosNeg(RWDigits Z, Digits X, Digits Y) {
+ // x | (-y) == x | ~(y-1) == ~((y-1) &~ x) == -(((y-1) &~ x) + 1)
+ int pairs = std::min(X.len(), Y.len());
+ digit_t borrow = 1;
+ int i = 0;
+ for (; i < pairs; i++) Z[i] = digit_sub(Y[i], borrow, &borrow) & ~X[i];
+ for (; i < Y.len(); i++) Z[i] = digit_sub(Y[i], borrow, &borrow);
+ DCHECK(borrow == 0); // NOLINT(readability/check)
+ for (; i < Z.len(); i++) Z[i] = 0;
+ Add(Z, 1);
+}
+
+void BitwiseXor_PosPos(RWDigits Z, Digits X, Digits Y) {
+ int pairs = X.len();
+ if (Y.len() < X.len()) {
+ std::swap(X, Y);
+ pairs = X.len();
+ }
+ DCHECK(X.len() <= Y.len());
+ int i = 0;
+ for (; i < pairs; i++) Z[i] = X[i] ^ Y[i];
+ for (; i < Y.len(); i++) Z[i] = Y[i];
+ for (; i < Z.len(); i++) Z[i] = 0;
+}
+
+void BitwiseXor_NegNeg(RWDigits Z, Digits X, Digits Y) {
+ // (-x) ^ (-y) == ~(x-1) ^ ~(y-1) == (x-1) ^ (y-1)
+ int pairs = std::min(X.len(), Y.len());
+ digit_t x_borrow = 1;
+ digit_t y_borrow = 1;
+ int i = 0;
+ for (; i < pairs; i++) {
+ Z[i] = digit_sub(X[i], x_borrow, &x_borrow) ^
+ digit_sub(Y[i], y_borrow, &y_borrow);
+ }
+ // (At least) one of the next two loops will perform zero iterations:
+ for (; i < X.len(); i++) Z[i] = digit_sub(X[i], x_borrow, &x_borrow);
+ for (; i < Y.len(); i++) Z[i] = digit_sub(Y[i], y_borrow, &y_borrow);
+ DCHECK(x_borrow == 0); // NOLINT(readability/check)
+ DCHECK(y_borrow == 0); // NOLINT(readability/check)
+ for (; i < Z.len(); i++) Z[i] = 0;
+}
+
+void BitwiseXor_PosNeg(RWDigits Z, Digits X, Digits Y) {
+ // x ^ (-y) == x ^ ~(y-1) == ~(x ^ (y-1)) == -((x ^ (y-1)) + 1)
+ int pairs = std::min(X.len(), Y.len());
+ digit_t borrow = 1;
+ int i = 0;
+ for (; i < pairs; i++) Z[i] = X[i] ^ digit_sub(Y[i], borrow, &borrow);
+ // (At least) one of the next two loops will perform zero iterations:
+ for (; i < X.len(); i++) Z[i] = X[i];
+ for (; i < Y.len(); i++) Z[i] = digit_sub(Y[i], borrow, &borrow);
+ DCHECK(borrow == 0); // NOLINT(readability/check)
+ for (; i < Z.len(); i++) Z[i] = 0;
+ Add(Z, 1);
+}
+
+namespace {
+
+// Z := (least significant n bits of X).
+void TruncateToNBits(RWDigits Z, Digits X, int n) {
+ int digits = DIV_CEIL(n, kDigitBits);
+ int bits = n % kDigitBits;
+ // Copy all digits except the MSD.
+ int last = digits - 1;
+ for (int i = 0; i < last; i++) {
+ Z[i] = X[i];
+ }
+ // The MSD might contain extra bits that we don't want.
+ digit_t msd = X[last];
+ if (bits != 0) {
+ int drop = kDigitBits - bits;
+ msd = (msd << drop) >> drop;
+ }
+ Z[last] = msd;
+}
+
+// Z := 2**n - (least significant n bits of X).
+void TruncateAndSubFromPowerOfTwo(RWDigits Z, Digits X, int n) {
+ int digits = DIV_CEIL(n, kDigitBits);
+ int bits = n % kDigitBits;
+ // Process all digits except the MSD. Take X's digits, then simulate leading
+ // zeroes.
+ int last = digits - 1;
+ int have_x = std::min(last, X.len());
+ digit_t borrow = 0;
+ int i = 0;
+ for (; i < have_x; i++) Z[i] = digit_sub2(0, X[i], borrow, &borrow);
+ for (; i < last; i++) Z[i] = digit_sub(0, borrow, &borrow);
+
+ // The MSD might contain extra bits that we don't want.
+ digit_t msd = last < X.len() ? X[last] : 0;
+ if (bits == 0) {
+ Z[last] = digit_sub2(0, msd, borrow, &borrow);
+ } else {
+ int drop = kDigitBits - bits;
+ msd = (msd << drop) >> drop;
+ digit_t minuend_msd = static_cast<digit_t>(1) << bits;
+ digit_t result_msd = digit_sub2(minuend_msd, msd, borrow, &borrow);
+ DCHECK(borrow == 0); // result < 2^n. NOLINT(readability/check)
+ // If all subtracted bits were zero, we have to get rid of the
+ // materialized minuend_msd again.
+ Z[last] = result_msd & (minuend_msd - 1);
+ }
+}
+
+} // namespace
+
+// Returns -1 when the operation would return X unchanged.
+int AsIntNResultLength(Digits X, bool x_negative, int n) {
+ int needed_digits = DIV_CEIL(n, kDigitBits);
+ // Generally: decide based on number of digits, and bits in the top digit.
+ if (X.len() < needed_digits) return -1;
+ if (X.len() > needed_digits) return needed_digits;
+ digit_t top_digit = X[needed_digits - 1];
+ digit_t compare_digit = digit_t{1} << ((n - 1) % kDigitBits);
+ if (top_digit < compare_digit) return -1;
+ if (top_digit > compare_digit) return needed_digits;
+ // Special case: if X == -2**(n-1), truncation is a no-op.
+ if (!x_negative) return needed_digits;
+ for (int i = needed_digits - 2; i >= 0; i--) {
+ if (X[i] != 0) return needed_digits;
+ }
+ return -1;
+}
+
+bool AsIntN(RWDigits Z, Digits X, bool x_negative, int n) {
+ DCHECK(X.len() > 0); // NOLINT(readability/check)
+ DCHECK(n > 0); // NOLINT(readability/check)
+ // NOLINTNEXTLINE(readability/check)
+ DCHECK(AsIntNResultLength(X, x_negative, n) > 0);
+ int needed_digits = DIV_CEIL(n, kDigitBits);
+ digit_t top_digit = X[needed_digits - 1];
+ digit_t compare_digit = digit_t{1} << ((n - 1) % kDigitBits);
+ // The canonical algorithm would be: convert negative numbers to two's
+ // complement representation, truncate, convert back to sign+magnitude. To
+ // avoid the conversions, we predict what the result would be:
+ // When the (n-1)th bit is not set:
+ // - truncate the absolute value
+ // - preserve the sign.
+ // When the (n-1)th bit is set:
+ // - subtract the truncated absolute value from 2**n to simulate two's
+ // complement representation
+ // - flip the sign, unless it's the special case where the input is negative
+ // and the result is the minimum n-bit integer. E.g. asIntN(3, -12) => -4.
+ bool has_bit = (top_digit & compare_digit) == compare_digit;
+ if (!has_bit) {
+ TruncateToNBits(Z, X, n);
+ return x_negative;
+ }
+ TruncateAndSubFromPowerOfTwo(Z, X, n);
+ if (!x_negative) return true; // Result is negative.
+ // Scan for the special case (see above): if all bits below the (n-1)th
+ // digit are zero, the result is negative.
+ if ((top_digit & (compare_digit - 1)) != 0) return false;
+ for (int i = needed_digits - 2; i >= 0; i--) {
+ if (X[i] != 0) return false;
+ }
+ return true;
+}
+
+// Returns -1 when the operation would return X unchanged.
+int AsUintN_Pos_ResultLength(Digits X, int n) {
+ int needed_digits = DIV_CEIL(n, kDigitBits);
+ if (X.len() < needed_digits) return -1;
+ if (X.len() > needed_digits) return needed_digits;
+ int bits_in_top_digit = n % kDigitBits;
+ if (bits_in_top_digit == 0) return -1;
+ digit_t top_digit = X[needed_digits - 1];
+ if ((top_digit >> bits_in_top_digit) == 0) return -1;
+ return needed_digits;
+}
+
+void AsUintN_Pos(RWDigits Z, Digits X, int n) {
+ DCHECK(AsUintN_Pos_ResultLength(X, n) > 0); // NOLINT(readability/check)
+ TruncateToNBits(Z, X, n);
+}
+
+void AsUintN_Neg(RWDigits Z, Digits X, int n) {
+ TruncateAndSubFromPowerOfTwo(Z, X, n);
+}
+
+} // namespace bigint
+} // namespace v8
diff --git a/deps/v8/src/bigint/mul-fft.cc b/deps/v8/src/bigint/mul-fft.cc
index a3971a7276..9c297c00df 100644
--- a/deps/v8/src/bigint/mul-fft.cc
+++ b/deps/v8/src/bigint/mul-fft.cc
@@ -144,7 +144,7 @@ void ShiftModFn_Large(digit_t* result, const digit_t* input, int digit_shift,
result[digit_shift] = digit_sub(sum, i0_part, &borrow);
input_carry = d >> (kDigitBits - bits_shift);
if (digit_shift + 1 < K) {
- digit_t d = input[1];
+ d = input[1];
digit_t subtrahend = (d << bits_shift) | input_carry;
result[digit_shift + 1] =
digit_sub2(iK_carry, subtrahend, borrow, &borrow);
diff --git a/deps/v8/src/bigint/vector-arithmetic.cc b/deps/v8/src/bigint/vector-arithmetic.cc
index 4191755bc9..9bbea3873e 100644
--- a/deps/v8/src/bigint/vector-arithmetic.cc
+++ b/deps/v8/src/bigint/vector-arithmetic.cc
@@ -118,5 +118,22 @@ bool SubtractSigned(RWDigits Z, Digits X, bool x_negative, Digits Y,
return !x_negative;
}
+void AddOne(RWDigits Z, Digits X) {
+ digit_t carry = 1;
+ int i = 0;
+ for (; carry > 0 && i < X.len(); i++) Z[i] = digit_add2(X[i], carry, &carry);
+ if (carry > 0) Z[i++] = carry;
+ for (; i < X.len(); i++) Z[i] = X[i];
+ for (; i < Z.len(); i++) Z[i] = 0;
+}
+
+void SubtractOne(RWDigits Z, Digits X) {
+ digit_t borrow = 1;
+ int i = 0;
+ for (; borrow > 0; i++) Z[i] = digit_sub(X[i], borrow, &borrow);
+ for (; i < X.len(); i++) Z[i] = X[i];
+ for (; i < Z.len(); i++) Z[i] = 0;
+}
+
} // namespace bigint
} // namespace v8
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 1ef63e1096..00f57bcbff 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -670,6 +670,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
Handle<Code> trampoline_code =
masm->isolate()->builtins()->code_handle(entry_trampoline);
DCHECK_EQ(kPushedStackSpace, pushed_stack_space);
+ USE(pushed_stack_space);
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
// Unlink this frame from the handler chain.
@@ -2282,13 +2283,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -----------------------------------
__ AssertFunction(r1);
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
- // Check that the function is not a "classConstructor".
- Label class_constructor;
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kFlagsOffset));
- __ tst(r3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
- __ b(ne, &class_constructor);
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
@@ -2363,14 +2358,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ ldrh(r2,
FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
__ InvokeFunctionCode(r1, no_reg, r2, r0, InvokeType::kJump);
-
- // The function is a "classConstructor", need to raise an exception.
- __ bind(&class_constructor);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ push(r1);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
- }
}
namespace {
@@ -2480,34 +2467,48 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// -- r0 : the number of arguments
// -- r1 : the target to call (can be any Object).
// -----------------------------------
-
- Label non_callable, non_smi;
- __ JumpIfSmi(r1, &non_callable);
- __ bind(&non_smi);
- __ LoadMap(r4, r1);
- __ CompareInstanceTypeRange(r4, r5, FIRST_JS_FUNCTION_TYPE,
- LAST_JS_FUNCTION_TYPE);
+ Register argc = r0;
+ Register target = r1;
+ Register map = r4;
+ Register instance_type = r5;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
+
+ Label non_callable, class_constructor;
+ __ JumpIfSmi(target, &non_callable);
+ __ LoadMap(map, target);
+ __ CompareInstanceTypeRange(map, instance_type,
+ FIRST_CALLABLE_JS_FUNCTION_TYPE,
+ LAST_CALLABLE_JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, ls);
- __ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ cmp(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
RelocInfo::CODE_TARGET, eq);
// Check if target has a [[Call]] internal method.
- __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
- __ tst(r4, Operand(Map::Bits1::IsCallableBit::kMask));
- __ b(eq, &non_callable);
+ {
+ Register flags = r4;
+ __ ldrb(flags, FieldMemOperand(map, Map::kBitFieldOffset));
+ map = no_reg;
+ __ tst(flags, Operand(Map::Bits1::IsCallableBit::kMask));
+ __ b(eq, &non_callable);
+ }
// Check if target is a proxy and call CallProxy external builtin
- __ cmp(r5, Operand(JS_PROXY_TYPE));
+ __ cmp(instance_type, Operand(JS_PROXY_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
+ // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ __ cmp(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
+ __ b(eq, &class_constructor);
+
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
// Overwrite the original receiver the (original) target.
- __ str(r1, __ ReceiverOperand(r0));
+ __ str(target, __ ReceiverOperand(argc));
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadNativeContextSlot(r1, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2516,8 +2517,18 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ bind(&non_callable);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r1);
+ __ Push(target);
__ CallRuntime(Runtime::kThrowCalledNonCallable);
+ __ Trap(); // Unreachable.
+ }
+
+ // 4. The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(target);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ __ Trap(); // Unreachable.
}
}
@@ -2582,31 +2593,40 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// -- r3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
+ Register argc = r0;
+ Register target = r1;
+ Register map = r4;
+ Register instance_type = r5;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
// Check if target is a Smi.
Label non_constructor, non_proxy;
- __ JumpIfSmi(r1, &non_constructor);
+ __ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r4, Map::kBitFieldOffset));
- __ tst(r2, Operand(Map::Bits1::IsConstructorBit::kMask));
- __ b(eq, &non_constructor);
+ __ ldr(map, FieldMemOperand(target, HeapObject::kMapOffset));
+ {
+ Register flags = r2;
+ DCHECK(!AreAliased(argc, target, map, instance_type, flags));
+ __ ldrb(flags, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ tst(flags, Operand(Map::Bits1::IsConstructorBit::kMask));
+ __ b(eq, &non_constructor);
+ }
// Dispatch based on instance type.
- __ CompareInstanceTypeRange(r4, r5, FIRST_JS_FUNCTION_TYPE,
+ __ CompareInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE,
LAST_JS_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
RelocInfo::CODE_TARGET, ls);
// Only dispatch to bound functions after checking whether they are
// constructors.
- __ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ cmp(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
RelocInfo::CODE_TARGET, eq);
// Only dispatch to proxies after checking whether they are constructors.
- __ cmp(r5, Operand(JS_PROXY_TYPE));
+ __ cmp(instance_type, Operand(JS_PROXY_TYPE));
__ b(ne, &non_proxy);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
RelocInfo::CODE_TARGET);
@@ -2615,9 +2635,10 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
- __ str(r1, __ ReceiverOperand(r0));
+ __ str(target, __ ReceiverOperand(argc));
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadNativeContextSlot(r1, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target,
+ Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index ac34e17354..27d13ecb46 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -2648,14 +2648,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -----------------------------------
__ AssertFunction(x1);
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
- // Check that function is not a "classConstructor".
- Label class_constructor;
__ LoadTaggedPointerField(
x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset));
- __ TestAndBranchIfAnySet(w3, SharedFunctionInfo::IsClassConstructorBit::kMask,
- &class_constructor);
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
@@ -2664,6 +2658,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
FieldMemOperand(x1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
+ __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset));
__ TestAndBranchIfAnySet(w3,
SharedFunctionInfo::IsNativeBit::kMask |
SharedFunctionInfo::IsStrictBit::kMask,
@@ -2730,15 +2725,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Ldrh(x2,
FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
__ InvokeFunctionCode(x1, no_reg, x2, x0, InvokeType::kJump);
-
- // The function is a "classConstructor", need to raise an exception.
- __ Bind(&class_constructor);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ PushArgument(x1);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
- __ Unreachable();
- }
}
namespace {
@@ -2905,35 +2891,49 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// -- x0 : the number of arguments
// -- x1 : the target to call (can be any Object).
// -----------------------------------
-
- Label non_callable, non_smi;
- __ JumpIfSmi(x1, &non_callable);
- __ Bind(&non_smi);
- __ LoadMap(x4, x1);
- __ CompareInstanceTypeRange(x4, x5, FIRST_JS_FUNCTION_TYPE,
- LAST_JS_FUNCTION_TYPE);
+ Register argc = x0;
+ Register target = x1;
+ Register map = x4;
+ Register instance_type = x5;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
+
+ Label non_callable, class_constructor;
+ __ JumpIfSmi(target, &non_callable);
+ __ LoadMap(map, target);
+ __ CompareInstanceTypeRange(map, instance_type,
+ FIRST_CALLABLE_JS_FUNCTION_TYPE,
+ LAST_CALLABLE_JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, ls);
- __ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
+ __ Cmp(instance_type, JS_BOUND_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
RelocInfo::CODE_TARGET, eq);
// Check if target has a [[Call]] internal method.
- __ Ldrb(x4, FieldMemOperand(x4, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x4, Map::Bits1::IsCallableBit::kMask,
- &non_callable);
+ {
+ Register flags = x4;
+ __ Ldrb(flags, FieldMemOperand(map, Map::kBitFieldOffset));
+ map = no_reg;
+ __ TestAndBranchIfAllClear(flags, Map::Bits1::IsCallableBit::kMask,
+ &non_callable);
+ }
// Check if target is a proxy and call CallProxy external builtin
- __ Cmp(x5, JS_PROXY_TYPE);
+ __ Cmp(instance_type, JS_PROXY_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
+ // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ __ Cmp(instance_type, JS_CLASS_CONSTRUCTOR_TYPE);
+ __ B(eq, &class_constructor);
+
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
// Overwrite the original receiver with the (original) target.
- __ Poke(x1, __ ReceiverOperand(x0));
+ __ Poke(target, __ ReceiverOperand(argc));
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadNativeContextSlot(x1, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2942,10 +2942,19 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ bind(&non_callable);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ PushArgument(x1);
+ __ PushArgument(target);
__ CallRuntime(Runtime::kThrowCalledNonCallable);
__ Unreachable();
}
+
+ // 4. The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ PushArgument(target);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ __ Unreachable();
+ }
}
// static
@@ -3016,31 +3025,41 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// -- x3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
+ Register argc = x0;
+ Register target = x1;
+ Register map = x4;
+ Register instance_type = x5;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
// Check if target is a Smi.
Label non_constructor, non_proxy;
- __ JumpIfSmi(x1, &non_constructor);
+ __ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ LoadTaggedPointerField(x4, FieldMemOperand(x1, HeapObject::kMapOffset));
- __ Ldrb(x2, FieldMemOperand(x4, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x2, Map::Bits1::IsConstructorBit::kMask,
- &non_constructor);
+ __ LoadTaggedPointerField(map,
+ FieldMemOperand(target, HeapObject::kMapOffset));
+ {
+ Register flags = x2;
+ DCHECK(!AreAliased(argc, target, map, instance_type, flags));
+ __ Ldrb(flags, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAllClear(flags, Map::Bits1::IsConstructorBit::kMask,
+ &non_constructor);
+ }
// Dispatch based on instance type.
- __ CompareInstanceTypeRange(x4, x5, FIRST_JS_FUNCTION_TYPE,
+ __ CompareInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE,
LAST_JS_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
RelocInfo::CODE_TARGET, ls);
// Only dispatch to bound functions after checking whether they are
// constructors.
- __ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
+ __ Cmp(instance_type, JS_BOUND_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
RelocInfo::CODE_TARGET, eq);
// Only dispatch to proxies after checking whether they are constructors.
- __ Cmp(x5, JS_PROXY_TYPE);
+ __ Cmp(instance_type, JS_PROXY_TYPE);
__ B(ne, &non_proxy);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
RelocInfo::CODE_TARGET);
@@ -3049,10 +3068,11 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
- __ Poke(x1, __ ReceiverOperand(x0));
+ __ Poke(target, __ ReceiverOperand(argc));
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadNativeContextSlot(x1, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target,
+ Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -3774,8 +3794,11 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
Register js_getter = x4;
__ LoadTaggedPointerField(
js_getter, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
- __ Ldr(api_function_address,
- FieldMemOperand(js_getter, Foreign::kForeignAddressOffset));
+
+ __ LoadExternalPointerField(
+ api_function_address,
+ FieldMemOperand(js_getter, Foreign::kForeignAddressOffset),
+ kForeignForeignAddressTag);
const int spill_offset = 1 + kApiStackSpace;
// +3 is to skip prolog, return address and name handle.
diff --git a/deps/v8/src/builtins/array-filter.tq b/deps/v8/src/builtins/array-filter.tq
index 1add88fa6a..bd892a2e76 100644
--- a/deps/v8/src/builtins/array-filter.tq
+++ b/deps/v8/src/builtins/array-filter.tq
@@ -97,7 +97,7 @@ transitioning builtin ArrayFilterLoopContinuation(implicit context: Context)(
transitioning macro FastArrayFilter(implicit context: Context)(
fastO: FastJSArray, len: Smi, callbackfn: Callable, thisArg: JSAny,
- output: FastJSArray) labels
+ output: FastJSArray): void labels
Bailout(Number, Number) {
let k: Smi = 0;
let to: Smi = 0;
diff --git a/deps/v8/src/builtins/array-from.tq b/deps/v8/src/builtins/array-from.tq
index e139e58de6..5fcdefccc3 100644
--- a/deps/v8/src/builtins/array-from.tq
+++ b/deps/v8/src/builtins/array-from.tq
@@ -79,7 +79,7 @@ ArrayFrom(js-implicit context: NativeContext, receiver: JSAny)(...arguments):
// memory, e.g. a proxy that discarded the values. Ignoring this case
// just means we would repeatedly call CreateDataProperty with index =
// 2^53
- assert(k < kMaxSafeInteger);
+ dcheck(k < kMaxSafeInteger);
// ii. Let Pk be ! ToString(k).
diff --git a/deps/v8/src/builtins/array-join.tq b/deps/v8/src/builtins/array-join.tq
index 6448c95875..12988af2a2 100644
--- a/deps/v8/src/builtins/array-join.tq
+++ b/deps/v8/src/builtins/array-join.tq
@@ -55,7 +55,7 @@ LoadJoinElement<array::FastDoubleElements>(
builtin LoadJoinTypedElement<T : type extends ElementsKind>(
context: Context, receiver: JSReceiver, k: uintptr): JSAny {
const typedArray: JSTypedArray = UnsafeCast<JSTypedArray>(receiver);
- assert(!IsDetachedBuffer(typedArray.buffer));
+ dcheck(!IsDetachedBuffer(typedArray.buffer));
return typed_array::LoadFixedTypedArrayElementAsTagged(
typedArray.data_ptr, k, typed_array::KindForArrayType<T>());
}
@@ -126,14 +126,14 @@ macro AddStringLength(implicit context: Context)(
macro StoreAndGrowFixedArray<T: type>(
fixedArray: FixedArray, index: intptr, element: T): FixedArray {
const length: intptr = fixedArray.length_intptr;
- assert(index <= length);
+ dcheck(index <= length);
if (index < length) {
fixedArray.objects[index] = element;
return fixedArray;
} else
deferred {
const newLength: intptr = CalculateNewElementsCapacity(length);
- assert(index < newLength);
+ dcheck(index < newLength);
const newfixedArray: FixedArray =
ExtractFixedArray(fixedArray, 0, length, newLength);
newfixedArray.objects[index] = element;
@@ -147,7 +147,7 @@ macro StoreAndGrowFixedArray<T: type>(
// Buffer.AddSeparators().
struct Buffer {
macro Add(implicit context: Context)(
- str: String, nofSeparators: intptr, separatorLength: intptr) {
+ str: String, nofSeparators: intptr, separatorLength: intptr): void {
// Add separators if necessary (at the beginning or more than one)
const writeSeparators: bool = this.index == 0 | nofSeparators > 1;
this.AddSeparators(nofSeparators, separatorLength, writeSeparators);
@@ -161,7 +161,7 @@ struct Buffer {
}
macro AddSeparators(implicit context: Context)(
- nofSeparators: intptr, separatorLength: intptr, write: bool) {
+ nofSeparators: intptr, separatorLength: intptr, write: bool): void {
if (nofSeparators == 0 || separatorLength == 0) return;
const nofSeparatorsInt: intptr = nofSeparators;
@@ -211,7 +211,7 @@ macro NewBuffer(len: uintptr, sep: String): Buffer {
const cappedBufferSize: intptr = len > kMaxNewSpaceFixedArrayElements ?
kMaxNewSpaceFixedArrayElements :
Signed(len);
- assert(cappedBufferSize > 0);
+ dcheck(cappedBufferSize > 0);
return Buffer{
fixedArray: AllocateZeroedFixedArray(cappedBufferSize),
index: 0,
@@ -222,7 +222,7 @@ macro NewBuffer(len: uintptr, sep: String): Buffer {
macro BufferJoin(implicit context: Context)(
buffer: Buffer, sep: String): String {
- assert(IsValidPositiveSmi(buffer.totalStringLength));
+ dcheck(IsValidPositiveSmi(buffer.totalStringLength));
if (buffer.totalStringLength == 0) return kEmptyString;
// Fast path when there's only one buffer element.
@@ -504,7 +504,8 @@ builtin JoinStackPop(implicit context: Context)(
}
// Fast path the common non-nested calls.
-macro JoinStackPopInline(implicit context: Context)(receiver: JSReceiver) {
+macro JoinStackPopInline(implicit context: Context)(receiver: JSReceiver):
+ void {
const stack: FixedArray = LoadJoinStack()
otherwise unreachable;
const len: intptr = stack.length_intptr;
diff --git a/deps/v8/src/builtins/array-lastindexof.tq b/deps/v8/src/builtins/array-lastindexof.tq
index fe416fa4a2..912b43abed 100644
--- a/deps/v8/src/builtins/array-lastindexof.tq
+++ b/deps/v8/src/builtins/array-lastindexof.tq
@@ -44,7 +44,7 @@ macro FastArrayLastIndexOf<Elements : type extends FixedArrayBase>(
const same: Boolean = StrictEqual(searchElement, element);
if (same == True) {
- assert(Is<FastJSArray>(array));
+ dcheck(Is<FastJSArray>(array));
return k;
}
} label Hole {} // Do nothing for holes.
@@ -52,7 +52,7 @@ macro FastArrayLastIndexOf<Elements : type extends FixedArrayBase>(
--k;
}
- assert(Is<FastJSArray>(array));
+ dcheck(Is<FastJSArray>(array));
return -1;
}
@@ -90,7 +90,7 @@ macro TryFastArrayLastIndexOf(
return FastArrayLastIndexOf<FixedArray>(
context, array, fromSmi, searchElement);
}
- assert(IsDoubleElementsKind(kind));
+ dcheck(IsDoubleElementsKind(kind));
return FastArrayLastIndexOf<FixedDoubleArray>(
context, array, fromSmi, searchElement);
}
diff --git a/deps/v8/src/builtins/array-map.tq b/deps/v8/src/builtins/array-map.tq
index 48c8f87681..1958d1eb59 100644
--- a/deps/v8/src/builtins/array-map.tq
+++ b/deps/v8/src/builtins/array-map.tq
@@ -97,13 +97,13 @@ transitioning builtin ArrayMapLoopContinuation(implicit context: Context)(
}
struct Vector {
- macro ReportSkippedElement() {
+ macro ReportSkippedElement(): void {
this.skippedElements = true;
}
macro CreateJSArray(implicit context: Context)(validLength: Smi): JSArray {
const length: Smi = this.fixedArray.length;
- assert(validLength <= length);
+ dcheck(validLength <= length);
let kind: ElementsKind = ElementsKind::PACKED_SMI_ELEMENTS;
if (!this.onlySmis) {
if (this.onlyNumbers) {
@@ -153,7 +153,8 @@ struct Vector {
return a;
}
- macro StoreResult(implicit context: Context)(index: Smi, result: JSAny) {
+ macro StoreResult(implicit context: Context)(
+ index: Smi, result: JSAny): void {
typeswitch (result) {
case (s: Smi): {
this.fixedArray.objects[index] = s;
diff --git a/deps/v8/src/builtins/array-reverse.tq b/deps/v8/src/builtins/array-reverse.tq
index b154483d06..69a678a513 100644
--- a/deps/v8/src/builtins/array-reverse.tq
+++ b/deps/v8/src/builtins/array-reverse.tq
@@ -27,23 +27,24 @@ LoadElement<array::FastPackedDoubleElements, float64>(
}
macro StoreElement<ElementsAccessor : type extends ElementsKind, T: type>(
- implicit context: Context)(elements: FixedArrayBase, index: Smi, value: T);
+ implicit context: Context)(
+ elements: FixedArrayBase, index: Smi, value: T): void;
StoreElement<array::FastPackedSmiElements, Smi>(implicit context: Context)(
- elements: FixedArrayBase, index: Smi, value: Smi) {
+ elements: FixedArrayBase, index: Smi, value: Smi): void {
const elems: FixedArray = UnsafeCast<FixedArray>(elements);
StoreFixedArrayElement(elems, index, value);
}
StoreElement<array::FastPackedObjectElements, JSAny>(implicit context: Context)(
- elements: FixedArrayBase, index: Smi, value: JSAny) {
+ elements: FixedArrayBase, index: Smi, value: JSAny): void {
const elements: FixedArray = UnsafeCast<FixedArray>(elements);
elements.objects[index] = value;
}
StoreElement<array::FastPackedDoubleElements, float64>(
implicit context: Context)(
- elements: FixedArrayBase, index: Smi, value: float64) {
+ elements: FixedArrayBase, index: Smi, value: float64): void {
const elems: FixedDoubleArray = UnsafeCast<FixedDoubleArray>(elements);
StoreFixedDoubleArrayElement(elems, index, value);
}
@@ -52,7 +53,7 @@ StoreElement<array::FastPackedDoubleElements, float64>(
// whether a property is present, so we can simply swap them using fast
// FixedArray loads/stores.
macro FastPackedArrayReverse<Accessor: type, T: type>(
- implicit context: Context)(elements: FixedArrayBase, length: Smi) {
+ implicit context: Context)(elements: FixedArrayBase, length: Smi): void {
let lower: Smi = 0;
let upper: Smi = length - 1;
@@ -138,8 +139,8 @@ transitioning macro GenericArrayReverse(
return object;
}
-macro TryFastPackedArrayReverse(implicit context: Context)(receiver: JSAny)
- labels Slow {
+macro TryFastPackedArrayReverse(implicit context: Context)(receiver: JSAny):
+ void labels Slow {
const array: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
const kind: ElementsKind = array.map.elements_kind;
diff --git a/deps/v8/src/builtins/array-slice.tq b/deps/v8/src/builtins/array-slice.tq
index 435431f49d..f5a644ef40 100644
--- a/deps/v8/src/builtins/array-slice.tq
+++ b/deps/v8/src/builtins/array-slice.tq
@@ -89,7 +89,7 @@ macro HandleFastSlice(
labels Bailout {
const start: Smi = Cast<Smi>(startNumber) otherwise Bailout;
const count: Smi = Cast<Smi>(countNumber) otherwise Bailout;
- assert(start >= 0);
+ dcheck(start >= 0);
try {
typeswitch (o) {
@@ -130,17 +130,6 @@ macro HandleFastSlice(
transitioning javascript builtin
ArrayPrototypeSlice(
js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
- // Handle array cloning case if the receiver is a fast array.
- if (arguments.length == 0) {
- typeswitch (receiver) {
- case (a: FastJSArrayForCopy): {
- return CloneFastJSArray(context, a);
- }
- case (JSAny): {
- }
- }
- }
-
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -161,6 +150,30 @@ ArrayPrototypeSlice(
const end: JSAny = arguments[1];
const relativeEnd: Number = end == Undefined ? len : ToInteger_Inline(end);
+ // Handle array cloning case if the receiver is a fast array. In the case
+ // where relativeStart is 0 but start is not the SMI zero (e.g., start is an
+ // object whose valueOf returns 0) we must not call CloneFastJSArray. This is
+ // because CloneFastArray reloads the array length, and the ToInteger above
+ // might have called user code which changed it. Thus, calling
+ // CloneFastJSArray here is safe only if we know ToInteger didn't call user
+ // code.
+
+ // This logic should be in sync with ArrayPrototypeSlice (to a reasonable
+ // degree). This is because CloneFastJSArray produces arrays which are
+ // potentially COW. If there's a discrepancy, TF generates code which produces
+ // a COW array and then expects it to be non-COW (or the other way around) ->
+ // immediate deopt.
+ if ((start == Undefined || TaggedEqual(start, SmiConstant(0))) &&
+ end == Undefined) {
+ typeswitch (receiver) {
+ case (a: FastJSArrayForCopy): {
+ return CloneFastJSArray(context, a);
+ }
+ case (JSAny): {
+ }
+ }
+ }
+
// 6. If relativeEnd < 0, let final be max((len + relativeEnd), 0);
// else let final be min(relativeEnd, len).
const final: Number =
@@ -169,12 +182,12 @@ ArrayPrototypeSlice(
// 7. Let count be max(final - k, 0).
const count: Number = Max(final - k, 0);
- assert(0 <= k);
- assert(k <= len);
- assert(0 <= final);
- assert(final <= len);
- assert(0 <= count);
- assert(count <= len);
+ dcheck(0 <= k);
+ dcheck(k <= len);
+ dcheck(0 <= final);
+ dcheck(final <= len);
+ dcheck(0 <= count);
+ dcheck(count <= len);
try {
return HandleFastSlice(context, o, k, count)
diff --git a/deps/v8/src/builtins/array.tq b/deps/v8/src/builtins/array.tq
index a9b4b1235b..2ec4ab8f4e 100644
--- a/deps/v8/src/builtins/array.tq
+++ b/deps/v8/src/builtins/array.tq
@@ -16,20 +16,21 @@ type FastSmiOrObjectElements extends ElementsKind;
type FastDoubleElements extends ElementsKind;
type DictionaryElements extends ElementsKind;
-macro EnsureWriteableFastElements(implicit context: Context)(array: JSArray) {
- assert(IsFastElementsKind(array.map.elements_kind));
+macro EnsureWriteableFastElements(implicit context: Context)(array: JSArray):
+ void {
+ dcheck(IsFastElementsKind(array.map.elements_kind));
const elements: FixedArrayBase = array.elements;
if (elements.map != kCOWMap) return;
// There are no COW *_DOUBLE_ELEMENTS arrays, so we are allowed to always
// extract FixedArrays and don't have to worry about FixedDoubleArrays.
- assert(IsFastSmiOrTaggedElementsKind(array.map.elements_kind));
+ dcheck(IsFastSmiOrTaggedElementsKind(array.map.elements_kind));
const length = Convert<intptr>(Cast<Smi>(array.length) otherwise unreachable);
array.elements =
ExtractFixedArray(UnsafeCast<FixedArray>(elements), 0, length, length);
- assert(array.elements.map != kCOWMap);
+ dcheck(array.elements.map != kCOWMap);
}
macro LoadElementOrUndefined(implicit context: Context)(
@@ -51,7 +52,7 @@ macro StoreArrayHole(elements: FixedArray, k: Smi): void {
elements.objects[k] = TheHole;
}
-extern macro SetPropertyLength(implicit context: Context)(JSAny, Number);
+extern macro SetPropertyLength(implicit context: Context)(JSAny, Number): void;
const kLengthDescriptorIndex:
constexpr int31 generates 'JSArray::kLengthDescriptorIndex';
@@ -72,7 +73,7 @@ macro EnsureArrayLengthWritable(implicit context: Context)(map: Map):
const descriptors: DescriptorArray = map.instance_descriptors;
const descriptor:&DescriptorEntry =
&descriptors.descriptors[kLengthDescriptorIndex];
- assert(TaggedEqual(descriptor->key, LengthStringConstant()));
+ dcheck(TaggedEqual(descriptor->key, LengthStringConstant()));
const details: Smi = UnsafeCast<Smi>(descriptor->details);
if ((details & kAttributesReadOnlyMask) != 0) {
goto Bailout;
diff --git a/deps/v8/src/builtins/arraybuffer.tq b/deps/v8/src/builtins/arraybuffer.tq
index fc0152f51a..f033048abc 100644
--- a/deps/v8/src/builtins/arraybuffer.tq
+++ b/deps/v8/src/builtins/arraybuffer.tq
@@ -47,7 +47,7 @@ transitioning javascript builtin ArrayBufferPrototypeGetMaxByteLength(
// 6. Else,
// a. Let length be O.[[ArrayBufferByteLength]].
// 7. Return F(length);
- assert(IsResizableArrayBuffer(o) || o.max_byte_length == o.byte_length);
+ dcheck(IsResizableArrayBuffer(o) || o.max_byte_length == o.byte_length);
return Convert<Number>(o.max_byte_length);
}
@@ -92,7 +92,7 @@ SharedArrayBufferPrototypeGetMaxByteLength(
// 5. Else,
// a. Let length be O.[[ArrayBufferByteLength]].
// 6. Return F(length);
- assert(IsResizableArrayBuffer(o) || o.max_byte_length == o.byte_length);
+ dcheck(IsResizableArrayBuffer(o) || o.max_byte_length == o.byte_length);
return Convert<Number>(o.max_byte_length);
}
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index af1813b61d..7716d94288 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -158,7 +158,7 @@ struct float64_or_hole {
return this.value;
}
macro ValueUnsafeAssumeNotHole(): float64 {
- assert(!this.is_hole);
+ dcheck(!this.is_hole);
return this.value;
}
@@ -307,9 +307,18 @@ extern enum ElementsKind extends int32 {
UINT8_CLAMPED_ELEMENTS,
BIGUINT64_ELEMENTS,
BIGINT64_ELEMENTS,
+ RAB_GSAB_UINT8_ELEMENTS,
+ // TODO(torque): Allow duplicate enum values.
+ // FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND,
+ // FIRST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND,
...
}
+const kFirstFixedTypedArrayElementsKind: constexpr ElementsKind =
+ ElementsKind::UINT8_ELEMENTS;
+const kFirstRabGsabFixedTypedArrayElementsKind: constexpr ElementsKind =
+ ElementsKind::RAB_GSAB_UINT8_ELEMENTS;
+
extern enum AllocationFlag extends int32
constexpr 'CodeStubAssembler::AllocationFlag' {
kNone,
@@ -554,7 +563,6 @@ extern class Filler extends HeapObject generates 'TNode<HeapObject>';
// Like JSObject, but created from API function.
@apiExposedInstanceTypeValue(0x422)
@doNotGenerateCast
-@noVerifier
extern class JSApiObject extends JSObject generates 'TNode<JSObject>';
// TODO(gsathya): This only exists to make JSApiObject instance type into a
@@ -562,7 +570,6 @@ extern class JSApiObject extends JSObject generates 'TNode<JSObject>';
@apiExposedInstanceTypeValue(0x80A)
@doNotGenerateCast
@highestInstanceTypeWithinParentClassRange
-@noVerifier
extern class JSLastDummyApiObject extends JSApiObject
generates 'TNode<JSObject>';
@@ -578,11 +585,11 @@ extern macro Is64(): constexpr bool;
extern macro SelectBooleanConstant(bool): Boolean;
-extern macro Print(constexpr string);
-extern macro Print(constexpr string, Object);
-extern macro Comment(constexpr string);
-extern macro Print(Object);
-extern macro DebugBreak();
+extern macro Print(constexpr string): void;
+extern macro Print(constexpr string, Object): void;
+extern macro Comment(constexpr string): void;
+extern macro Print(Object): void;
+extern macro DebugBreak(): void;
// ES6 7.1.4 ToInteger ( argument )
transitioning macro ToIntegerImpl(implicit context: Context)(input: JSAny):
@@ -601,7 +608,7 @@ transitioning macro ToIntegerImpl(implicit context: Context)(input: JSAny):
// ToInteger normalizes -0 to +0.
if (value == 0.0) return SmiConstant(0);
const result = ChangeFloat64ToTagged(value);
- assert(IsNumberNormalized(result));
+ dcheck(IsNumberNormalized(result));
return result;
}
case (a: JSAnyNotNumber): {
@@ -741,9 +748,9 @@ transitioning macro ToPrimitiveDefault(implicit context: Context)(v: JSAny):
}
}
-extern transitioning runtime NormalizeElements(Context, JSObject);
+extern transitioning runtime NormalizeElements(Context, JSObject): void;
extern transitioning runtime TransitionElementsKindWithKind(
- Context, JSObject, Smi);
+ Context, JSObject, Smi): void;
extern macro LoadBufferObject(RawPtr, constexpr int32): Object;
extern macro LoadBufferPointer(RawPtr, constexpr int32): RawPtr;
@@ -806,6 +813,8 @@ extern macro IsElementsKindLessThanOrEqual(
ElementsKind, constexpr ElementsKind): bool;
extern macro IsElementsKindGreaterThan(
ElementsKind, constexpr ElementsKind): bool;
+extern macro IsElementsKindGreaterThanOrEqual(
+ ElementsKind, constexpr ElementsKind): bool;
extern macro IsElementsKindInRange(
ElementsKind, constexpr ElementsKind, constexpr ElementsKind): bool;
@@ -1228,7 +1237,7 @@ extern operator '.elements_kind' macro LoadElementsKind(JSTypedArray):
extern operator '.length' macro LoadFastJSArrayLength(FastJSArray): Smi;
operator '.length=' macro StoreFastJSArrayLength(
- array: FastJSArray, length: Smi) {
+ array: FastJSArray, length: Smi): void {
const array: JSArray = array;
array.length = length;
}
@@ -1252,7 +1261,7 @@ macro FastHoleyElementsKind(kind: ElementsKind): ElementsKind {
} else if (kind == ElementsKind::PACKED_DOUBLE_ELEMENTS) {
return ElementsKind::HOLEY_DOUBLE_ELEMENTS;
}
- assert(kind == ElementsKind::PACKED_ELEMENTS);
+ dcheck(kind == ElementsKind::PACKED_ELEMENTS);
return ElementsKind::HOLEY_ELEMENTS;
}
@@ -1362,7 +1371,7 @@ macro NumberIsNaN(number: Number): bool {
}
}
-extern macro GotoIfForceSlowPath() labels Taken;
+extern macro GotoIfForceSlowPath(): void labels Taken;
macro IsForceSlowPath(): bool {
GotoIfForceSlowPath() otherwise return true;
return false;
@@ -1394,10 +1403,10 @@ macro SameValue(a: JSAny, b: JSAny): bool {
// Does "if (index1 + index2 > limit) goto IfOverflow" in an uintptr overflow
// friendly way where index1 and index2 are in [0, kMaxSafeInteger] range.
macro CheckIntegerIndexAdditionOverflow(
- index1: uintptr, index2: uintptr, limit: uintptr) labels IfOverflow {
+ index1: uintptr, index2: uintptr, limit: uintptr): void labels IfOverflow {
if constexpr (Is64()) {
- assert(index1 <= kMaxSafeIntegerUint64);
- assert(index2 <= kMaxSafeIntegerUint64);
+ dcheck(index1 <= kMaxSafeIntegerUint64);
+ dcheck(index2 <= kMaxSafeIntegerUint64);
// Given that both index1 and index2 are in a safe integer range the
// addition can't overflow.
if (index1 + index2 > limit) goto IfOverflow;
@@ -1431,7 +1440,7 @@ macro TryNumberToUintPtr(valueNumber: Number, kMode: constexpr int31):
if (kMode == kModeValueIsAnyNumber) {
if (valueSmi < 0) goto IfLessThanZero;
} else {
- assert(valueSmi >= 0);
+ dcheck(valueSmi >= 0);
}
const value: uintptr = Unsigned(Convert<intptr>(valueSmi));
// Positive Smi values definitely fit into both [0, kMaxSafeInteger] and
@@ -1439,14 +1448,14 @@ macro TryNumberToUintPtr(valueNumber: Number, kMode: constexpr int31):
return value;
}
case (valueHeapNumber: HeapNumber): {
- assert(IsNumberNormalized(valueHeapNumber));
+ dcheck(IsNumberNormalized(valueHeapNumber));
const valueDouble: float64 = Convert<float64>(valueHeapNumber);
// NaNs must be handled outside.
- assert(!Float64IsNaN(valueDouble));
+ dcheck(!Float64IsNaN(valueDouble));
if (kMode == kModeValueIsAnyNumber) {
if (valueDouble < 0) goto IfLessThanZero;
} else {
- assert(valueDouble >= 0);
+ dcheck(valueDouble >= 0);
}
if constexpr (Is64()) {
@@ -1455,7 +1464,7 @@ macro TryNumberToUintPtr(valueNumber: Number, kMode: constexpr int31):
if (kMode == kModeValueIsAnyNumber) {
if (valueDouble > kMaxSafeInteger) goto IfSafeIntegerOverflow;
} else {
- assert(valueDouble <= kMaxSafeInteger);
+ dcheck(valueDouble <= kMaxSafeInteger);
}
} else {
// On 32-bit architectures uintptr range is smaller than safe integer
@@ -1464,7 +1473,7 @@ macro TryNumberToUintPtr(valueNumber: Number, kMode: constexpr int31):
kMode == kModeValueIsSafeInteger) {
if (valueDouble > kMaxUInt32Double) goto IfUIntPtrOverflow;
} else {
- assert(valueDouble <= kMaxUInt32Double);
+ dcheck(valueDouble <= kMaxUInt32Double);
}
}
return ChangeFloat64ToUintPtr(valueDouble);
@@ -1602,13 +1611,13 @@ macro ConvertToRelativeIndex(indexNumber: Number, length: uintptr): uintptr {
}
}
case (indexHeapNumber: HeapNumber): {
- assert(IsNumberNormalized(indexHeapNumber));
+ dcheck(IsNumberNormalized(indexHeapNumber));
const indexDouble: float64 = Convert<float64>(indexHeapNumber);
// NaNs must already be handled by ConvertToRelativeIndex() version
// above accepting JSAny indices.
- assert(!Float64IsNaN(indexDouble));
+ dcheck(!Float64IsNaN(indexDouble));
const lengthDouble: float64 = Convert<float64>(length);
- assert(lengthDouble <= kMaxSafeInteger);
+ dcheck(lengthDouble <= kMaxSafeInteger);
if (indexDouble < 0) {
const relativeIndex: float64 = lengthDouble + indexDouble;
return relativeIndex > 0 ? ChangeFloat64ToUintPtr(relativeIndex) : 0;
@@ -1643,15 +1652,15 @@ macro ClampToIndexRange(indexNumber: Number, limit: uintptr): uintptr {
return index;
}
case (indexHeapNumber: HeapNumber): {
- assert(IsNumberNormalized(indexHeapNumber));
+ dcheck(IsNumberNormalized(indexHeapNumber));
const indexDouble: float64 = Convert<float64>(indexHeapNumber);
// NaNs must already be handled by ClampToIndexRange() version
// above accepting JSAny indices.
- assert(!Float64IsNaN(indexDouble));
+ dcheck(!Float64IsNaN(indexDouble));
if (indexDouble <= 0) return 0;
const maxIndexDouble: float64 = Convert<float64>(limit);
- assert(maxIndexDouble <= kMaxSafeInteger);
+ dcheck(maxIndexDouble <= kMaxSafeInteger);
if (indexDouble >= maxIndexDouble) return limit;
return ChangeFloat64ToUintPtr(indexDouble);
@@ -1713,10 +1722,10 @@ macro IsFastJSArrayForReadWithNoCustomIteration(context: Context, o: Object):
}
extern transitioning runtime
-CreateDataProperty(implicit context: Context)(JSReceiver, JSAny, JSAny);
+CreateDataProperty(implicit context: Context)(JSReceiver, JSAny, JSAny): void;
extern transitioning runtime SetOwnPropertyIgnoreAttributes(
- implicit context: Context)(JSObject, String, JSAny, Smi);
+ implicit context: Context)(JSObject, String, JSAny, Smi): void;
namespace runtime {
extern runtime
@@ -1746,7 +1755,7 @@ transitioning builtin FastCreateDataProperty(implicit context: Context)(
BuildAppendJSArray(ElementsKind::HOLEY_DOUBLE_ELEMENTS, array, value)
otherwise Slow;
} else {
- assert(IsFastSmiOrTaggedElementsKind(kind));
+ dcheck(IsFastSmiOrTaggedElementsKind(kind));
BuildAppendJSArray(ElementsKind::HOLEY_ELEMENTS, array, value)
otherwise Slow;
}
@@ -1767,7 +1776,7 @@ transitioning builtin FastCreateDataProperty(implicit context: Context)(
otherwise unreachable;
doubleElements[index] = numberValue;
} else {
- assert(IsFastSmiOrTaggedElementsKind(kind));
+ dcheck(IsFastSmiOrTaggedElementsKind(kind));
const elements = Cast<FixedArray>(array.elements) otherwise unreachable;
elements[index] = value;
}
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 75c3c194b9..e5a3d44686 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -36,7 +36,7 @@ void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() {
context(), method_name, original_array, len());
// In the Spec and our current implementation, the length check is already
// performed in TypedArraySpeciesCreate.
- CSA_ASSERT(this, UintPtrLessThanOrEqual(len(), LoadJSTypedArrayLength(a)));
+ CSA_DCHECK(this, UintPtrLessThanOrEqual(len(), LoadJSTypedArrayLength(a)));
fast_typed_array_target_ =
Word32Equal(LoadElementsKind(original_array), LoadElementsKind(a));
a_ = a;
@@ -184,6 +184,7 @@ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody(
void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
TNode<JSArrayBuffer> array_buffer, const CallResultProcessor& processor,
ForEachDirection direction, TNode<JSTypedArray> typed_array) {
+ // TODO(v8:11111): Support RAB / GSAB.
VariableList list({&a_, &k_}, zone());
TNode<UintPtrT> start = UintPtrConstant(0);
@@ -228,7 +229,7 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
auto context = Parameter<Context>(Descriptor::kContext);
- CSA_ASSERT(this, IsUndefined(Parameter<Object>(Descriptor::kJSNewTarget)));
+ CSA_DCHECK(this, IsUndefined(Parameter<Object>(Descriptor::kJSNewTarget)));
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
@@ -248,7 +249,7 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
BIND(&fast);
{
TNode<JSArray> array_receiver = CAST(receiver);
- CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array_receiver)));
+ CSA_DCHECK(this, TaggedIsPositiveSmi(LoadJSArrayLength(array_receiver)));
TNode<IntPtrT> length =
LoadAndUntagObjectField(array_receiver, JSArray::kLengthOffset);
Label return_undefined(this), fast_elements(this);
@@ -330,7 +331,7 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
auto context = Parameter<Context>(Descriptor::kContext);
- CSA_ASSERT(this, IsUndefined(Parameter<Object>(Descriptor::kJSNewTarget)));
+ CSA_DCHECK(this, IsUndefined(Parameter<Object>(Descriptor::kJSNewTarget)));
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
@@ -369,8 +370,8 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
Increment(&arg_index);
// The runtime SetProperty call could have converted the array to dictionary
// mode, which must be detected to abort the fast-path.
- TNode<Int32T> kind = LoadElementsKind(array_receiver);
- GotoIf(Word32Equal(kind, Int32Constant(DICTIONARY_ELEMENTS)),
+ TNode<Int32T> elements_kind = LoadElementsKind(array_receiver);
+ GotoIf(Word32Equal(elements_kind, Int32Constant(DICTIONARY_ELEMENTS)),
&default_label);
GotoIfNotNumber(arg, &object_push);
@@ -413,8 +414,8 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
Increment(&arg_index);
// The runtime SetProperty call could have converted the array to dictionary
// mode, which must be detected to abort the fast-path.
- TNode<Int32T> kind = LoadElementsKind(array_receiver);
- GotoIf(Word32Equal(kind, Int32Constant(DICTIONARY_ELEMENTS)),
+ TNode<Int32T> elements_kind = LoadElementsKind(array_receiver);
+ GotoIf(Word32Equal(elements_kind, Int32Constant(DICTIONARY_ELEMENTS)),
&default_label);
Goto(&object_push);
}
@@ -449,7 +450,7 @@ TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinsAssembler) {
TNode<BInt> begin = SmiToBInt(Parameter<Smi>(Descriptor::kBegin));
TNode<BInt> count = SmiToBInt(Parameter<Smi>(Descriptor::kCount));
- CSA_ASSERT(this, Word32BinaryNot(IsNoElementsProtectorCellInvalid()));
+ CSA_DCHECK(this, Word32BinaryNot(IsNoElementsProtectorCellInvalid()));
Return(ExtractFastJSArray(context, array, begin, count));
}
@@ -458,7 +459,7 @@ TF_BUILTIN(CloneFastJSArray, ArrayBuiltinsAssembler) {
auto context = Parameter<Context>(Descriptor::kContext);
auto array = Parameter<JSArray>(Descriptor::kSource);
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32Or(Word32BinaryNot(IsHoleyFastElementsKindForRead(
LoadElementsKind(array))),
Word32BinaryNot(IsNoElementsProtectorCellInvalid())));
@@ -477,7 +478,7 @@ TF_BUILTIN(CloneFastJSArrayFillingHoles, ArrayBuiltinsAssembler) {
auto context = Parameter<Context>(Descriptor::kContext);
auto array = Parameter<JSArray>(Descriptor::kSource);
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32Or(Word32BinaryNot(IsHoleyFastElementsKindForRead(
LoadElementsKind(array))),
Word32BinaryNot(IsNoElementsProtectorCellInvalid())));
@@ -526,7 +527,7 @@ class ArrayPopulatorAssembler : public CodeStubAssembler {
TNode<Number> length) {
TVARIABLE(Object, array);
Label is_constructor(this), is_not_constructor(this), done(this);
- CSA_ASSERT(this, IsNumberNormalized(length));
+ CSA_DCHECK(this, IsNumberNormalized(length));
GotoIf(TaggedIsSmi(receiver), &is_not_constructor);
Branch(IsConstructor(CAST(receiver)), &is_constructor, &is_not_constructor);
@@ -619,7 +620,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
TNode<JSArray> array = CAST(receiver);
// JSArray length is always a positive Smi for fast arrays.
- CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array)));
+ CSA_DCHECK(this, TaggedIsPositiveSmi(LoadJSArrayLength(array)));
TNode<Smi> array_length = LoadFastJSArrayLength(array);
TNode<IntPtrT> array_length_untagged = SmiUntag(array_length);
@@ -806,16 +807,16 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
BIND(&not_nan_loop);
{
- Label continue_loop(this), not_smi(this);
+ Label continue_loop(this), element_k_not_smi(this);
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
TNode<Object> element_k =
UnsafeLoadFixedArrayElement(elements, index_var.value());
- GotoIfNot(TaggedIsSmi(element_k), &not_smi);
+ GotoIfNot(TaggedIsSmi(element_k), &element_k_not_smi);
Branch(Float64Equal(search_num.value(), SmiToFloat64(CAST(element_k))),
&return_found, &continue_loop);
- BIND(&not_smi);
+ BIND(&element_k_not_smi);
GotoIfNot(IsHeapNumber(CAST(element_k)), &continue_loop);
Branch(Float64Equal(search_num.value(),
LoadHeapNumberValue(CAST(element_k))),
@@ -1207,7 +1208,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
// Let index be O.[[ArrayIteratorNextIndex]].
TNode<Number> index = LoadJSArrayIteratorNextIndex(iterator);
- CSA_ASSERT(this, IsNumberNonNegativeSafeInteger(index));
+ CSA_DCHECK(this, IsNumberNonNegativeSafeInteger(index));
// Dispatch based on the type of the {array}.
TNode<Map> array_map = LoadMap(array);
@@ -1219,7 +1220,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&if_array);
{
// If {array} is a JSArray, then the {index} must be in Unsigned32 range.
- CSA_ASSERT(this, IsNumberArrayIndex(index));
+ CSA_DCHECK(this, IsNumberArrayIndex(index));
// Check that the {index} is within range for the {array}. We handle all
// kinds of JSArray's here, so we do the computation on Uint32.
@@ -1260,8 +1261,8 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&if_other);
{
// We cannot enter here with either JSArray's or JSTypedArray's.
- CSA_ASSERT(this, Word32BinaryNot(IsJSArray(array)));
- CSA_ASSERT(this, Word32BinaryNot(IsJSTypedArray(array)));
+ CSA_DCHECK(this, Word32BinaryNot(IsJSArray(array)));
+ CSA_DCHECK(this, Word32BinaryNot(IsJSTypedArray(array)));
// Check that the {index} is within the bounds of the {array}s "length".
TNode<Number> length = CAST(
@@ -1297,7 +1298,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
//
// Note specifically that JSTypedArray's will never take this path, so
// we don't need to worry about their maximum value.
- CSA_ASSERT(this, Word32BinaryNot(IsJSTypedArray(array)));
+ CSA_DCHECK(this, Word32BinaryNot(IsJSTypedArray(array)));
TNode<Number> max_length =
SelectConstant(IsJSArray(array), NumberConstant(kMaxUInt32),
NumberConstant(kMaxSafeInteger));
@@ -1382,8 +1383,8 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
TNode<Number> start, TNode<Number> depth,
base::Optional<TNode<HeapObject>> mapper_function = base::nullopt,
base::Optional<TNode<Object>> this_arg = base::nullopt) {
- CSA_ASSERT(this, IsNumberPositive(source_length));
- CSA_ASSERT(this, IsNumberPositive(start));
+ CSA_DCHECK(this, IsNumberPositive(source_length));
+ CSA_DCHECK(this, IsNumberPositive(start));
// 1. Let targetIndex be start.
TVARIABLE(Number, var_target_index, start);
@@ -1404,7 +1405,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
// a. Let P be ! ToString(sourceIndex).
// b. Let exists be ? HasProperty(source, P).
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
SmiGreaterThanOrEqual(CAST(source_index), SmiConstant(0)));
const TNode<Oddball> exists =
HasProperty(context, source, source_index, kHasProperty);
@@ -1419,7 +1420,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
// ii. If mapperFunction is present, then
if (mapper_function) {
- CSA_ASSERT(this, Word32Or(IsUndefined(mapper_function.value()),
+ CSA_DCHECK(this, Word32Or(IsUndefined(mapper_function.value()),
IsCallable(mapper_function.value())));
DCHECK(this_arg.has_value());
@@ -1445,7 +1446,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
BIND(&if_flatten_array);
{
- CSA_ASSERT(this, IsJSArray(element));
+ CSA_DCHECK(this, IsJSArray(element));
// 1. Let elementLen be ? ToLength(? Get(element, "length")).
const TNode<Object> element_length =
@@ -1462,7 +1463,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
BIND(&if_flatten_proxy);
{
- CSA_ASSERT(this, IsJSProxy(element));
+ CSA_DCHECK(this, IsJSProxy(element));
// 1. Let elementLen be ? ToLength(? Get(element, "length")).
const TNode<Number> element_length = ToLength_Inline(
@@ -1802,11 +1803,11 @@ TF_BUILTIN(ArrayConstructorImpl, ArrayBuiltinsAssembler) {
Parameter<HeapObject>(Descriptor::kAllocationSite);
// Initial map for the builtin Array functions should be Map.
- CSA_ASSERT(this, IsMap(CAST(LoadObjectField(
+ CSA_DCHECK(this, IsMap(CAST(LoadObjectField(
target, JSFunction::kPrototypeOrInitialMapOffset))));
// We should either have undefined or a valid AllocationSite
- CSA_ASSERT(this, Word32Or(IsUndefined(maybe_allocation_site),
+ CSA_DCHECK(this, Word32Or(IsUndefined(maybe_allocation_site),
IsAllocationSite(maybe_allocation_site)));
// "Enter" the context of the Array function.
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index 703be0198a..1baba71926 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -10,7 +10,7 @@
#include "src/debug/debug.h"
#include "src/execution/isolate.h"
#include "src/execution/protectors-inl.h"
-#include "src/handles/global-handles.h"
+#include "src/handles/global-handles-inl.h"
#include "src/logging/counters.h"
#include "src/objects/contexts.h"
#include "src/objects/elements-inl.h"
@@ -1338,8 +1338,8 @@ Object Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
if (length == 0) break;
FixedDoubleArray elements =
FixedDoubleArray::cast(array.elements());
- for (uint32_t i = 0; i < length; i++) {
- if (elements.is_the_hole(i)) {
+ for (uint32_t k = 0; k < length; k++) {
+ if (elements.is_the_hole(k)) {
// TODO(jkummerow/verwaest): We could be a bit more clever
// here: Check if there are no elements/getters on the
// prototype chain, and if so, allow creation of a holey
@@ -1348,7 +1348,7 @@ Object Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
failure = true;
break;
}
- double double_value = elements.get_scalar(i);
+ double double_value = elements.get_scalar(k);
double_storage->set(j, double_value);
j++;
}
@@ -1358,8 +1358,8 @@ Object Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
case PACKED_SMI_ELEMENTS: {
Object the_hole = ReadOnlyRoots(isolate).the_hole_value();
FixedArray elements(FixedArray::cast(array.elements()));
- for (uint32_t i = 0; i < length; i++) {
- Object element = elements.get(i);
+ for (uint32_t k = 0; k < length; k++) {
+ Object element = elements.get(k);
if (element == the_hole) {
failure = true;
break;
diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc
index f995299b7e..ed0110ba2c 100644
--- a/deps/v8/src/builtins/builtins-arraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-arraybuffer.cc
@@ -191,8 +191,6 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
// * [SAB] If IsSharedArrayBuffer(O) is false, throw a TypeError exception.
CHECK_SHARED(is_shared, array_buffer, kMethodName);
- CHECK_RESIZABLE(false, array_buffer, kMethodName);
-
// * [AB] If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
if (!is_shared && array_buffer->was_detached()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -203,7 +201,7 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
// * [AB] Let len be O.[[ArrayBufferByteLength]].
// * [SAB] Let len be O.[[ArrayBufferByteLength]].
- double const len = array_buffer->byte_length();
+ double const len = array_buffer->GetByteLength();
// * Let relativeStart be ? ToInteger(start).
Handle<Object> relative_start;
@@ -215,7 +213,6 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
double const first = (relative_start->Number() < 0)
? std::max(len + relative_start->Number(), 0.0)
: std::min(relative_start->Number(), len);
- Handle<Object> first_obj = isolate->factory()->NewNumber(first);
// * If end is undefined, let relativeEnd be len; else let relativeEnd be ?
// ToInteger(end).
@@ -279,6 +276,9 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
Handle<JSArrayBuffer> new_array_buffer = Handle<JSArrayBuffer>::cast(new_);
CHECK_SHARED(is_shared, new_array_buffer, kMethodName);
+ // The created ArrayBuffer might or might not be resizable, since the species
+ // constructor might return a non-resizable or a resizable buffer.
+
// * [AB] If IsDetachedBuffer(new) is true, throw a TypeError exception.
if (!is_shared && new_array_buffer->was_detached()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -302,7 +302,8 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
}
// * If new.[[ArrayBufferByteLength]] < newLen, throw a TypeError exception.
- if (new_array_buffer->byte_length() < new_len) {
+ size_t new_array_buffer_byte_length = new_array_buffer->GetByteLength();
+ if (new_array_buffer_byte_length < new_len) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewTypeError(is_shared ? MessageTemplate::kSharedArrayBufferTooShort
@@ -321,21 +322,35 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
// * Let fromBuf be O.[[ArrayBufferData]].
// * Let toBuf be new.[[ArrayBufferData]].
// * Perform CopyDataBlockBytes(toBuf, 0, fromBuf, first, newLen).
- size_t first_size = 0, new_len_size = 0;
- CHECK(TryNumberToSize(*first_obj, &first_size));
- CHECK(TryNumberToSize(*new_len_obj, &new_len_size));
- DCHECK(new_array_buffer->byte_length() >= new_len_size);
+ size_t first_size = first;
+ size_t new_len_size = new_len;
+ DCHECK(new_array_buffer_byte_length >= new_len_size);
if (new_len_size != 0) {
- size_t from_byte_length = array_buffer->byte_length();
- USE(from_byte_length);
+ size_t from_byte_length = array_buffer->GetByteLength();
+ if (V8_UNLIKELY(!is_shared && array_buffer->is_resizable())) {
+ // The above steps might have resized the underlying buffer. In that case,
+ // only copy the still-accessible portion of the underlying data.
+ if (first_size > from_byte_length) {
+ return *new_; // Nothing to copy.
+ }
+ if (new_len_size > from_byte_length - first_size) {
+ new_len_size = from_byte_length - first_size;
+ }
+ }
DCHECK(first_size <= from_byte_length);
DCHECK(from_byte_length - first_size >= new_len_size);
uint8_t* from_data =
- reinterpret_cast<uint8_t*>(array_buffer->backing_store());
+ reinterpret_cast<uint8_t*>(array_buffer->backing_store()) + first_size;
uint8_t* to_data =
reinterpret_cast<uint8_t*>(new_array_buffer->backing_store());
- CopyBytes(to_data, from_data + first_size, new_len_size);
+ if (is_shared) {
+ base::Relaxed_Memcpy(reinterpret_cast<base::Atomic8*>(to_data),
+ reinterpret_cast<base::Atomic8*>(from_data),
+ new_len_size);
+ } else {
+ CopyBytes(to_data, from_data, new_len_size);
+ }
}
return *new_;
@@ -479,17 +494,7 @@ BUILTIN(SharedArrayBufferPrototypeGetByteLength) {
array_buffer->GetBackingStore()->max_byte_length());
// 4. Let length be ArrayBufferByteLength(O, SeqCst).
- size_t byte_length;
- if (array_buffer->is_resizable()) {
- // Invariant: byte_length for GSAB is 0 (it needs to be read from the
- // BackingStore).
- DCHECK_EQ(0, array_buffer->byte_length());
-
- byte_length =
- array_buffer->GetBackingStore()->byte_length(std::memory_order_seq_cst);
- } else {
- byte_length = array_buffer->byte_length();
- }
+ size_t byte_length = array_buffer->GetByteLength();
// 5. Return F(length).
return *isolate->factory()->NewNumberFromSize(byte_length);
}
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index c5b4eb9041..039f4ade69 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -55,7 +55,7 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
// unnecessary runtime checks removed.
// Ensure that the {async_function_object} is neither closed nor running.
- CSA_SLOW_ASSERT(
+ CSA_SLOW_DCHECK(
this, SmiGreaterThan(
LoadObjectField<Smi>(async_function_object,
JSGeneratorObject::kContinuationOffset),
@@ -97,7 +97,7 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
// Allocate and initialize the register file.
TNode<FixedArrayBase> parameters_and_registers =
AllocateFixedArray(HOLEY_ELEMENTS, parameters_and_register_length,
- kAllowLargeObjectAllocation);
+ AllocationFlag::kAllowLargeObjectAllocation);
FillFixedArrayWithValue(HOLEY_ELEMENTS, parameters_and_registers,
IntPtrConstant(0), parameters_and_register_length,
RootIndex::kUndefinedValue);
@@ -226,7 +226,7 @@ TF_BUILTIN(AsyncFunctionLazyDeoptContinuation, AsyncFunctionBuiltinsAssembler) {
}
TF_BUILTIN(AsyncFunctionAwaitRejectClosure, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 1);
+ CSA_DCHECK_JS_ARGC_EQ(this, 1);
const auto sentError = Parameter<Object>(Descriptor::kSentError);
const auto context = Parameter<Context>(Descriptor::kContext);
@@ -236,7 +236,7 @@ TF_BUILTIN(AsyncFunctionAwaitRejectClosure, AsyncFunctionBuiltinsAssembler) {
}
TF_BUILTIN(AsyncFunctionAwaitResolveClosure, AsyncFunctionBuiltinsAssembler) {
- CSA_ASSERT_JS_ARGC_EQ(this, 1);
+ CSA_DCHECK_JS_ARGC_EQ(this, 1);
const auto sentValue = Parameter<Object>(Descriptor::kSentValue);
const auto context = Parameter<Context>(Descriptor::kContext);
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index 4d821c8279..0adb95ad43 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -55,12 +55,12 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOld(
// Let promiseCapability be ! NewPromiseCapability(%Promise%).
const TNode<JSFunction> promise_fun =
CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
- CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(promise_fun)));
+ CSA_DCHECK(this, IsFunctionWithPrototypeSlotMap(LoadMap(promise_fun)));
const TNode<Map> promise_map = CAST(
LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset));
// Assert that the JSPromise map has an instance size is
// JSPromise::kSizeWithEmbedderFields.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
IntPtrEqual(LoadMapInstanceSizeInWords(promise_map),
IntPtrConstant(JSPromise::kSizeWithEmbedderFields /
kTaggedSize)));
@@ -259,7 +259,7 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
// Ensure that we don't have to initialize prototype_or_initial_map field of
// JSFunction.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
IntPtrEqual(LoadMapInstanceSizeInWords(function_map),
IntPtrConstant(JSFunction::kSizeWithoutPrototype /
kTaggedSize)));
@@ -302,7 +302,7 @@ TNode<JSFunction> AsyncBuiltinsAssembler::CreateUnwrapClosure(
TNode<Context> AsyncBuiltinsAssembler::AllocateAsyncIteratorValueUnwrapContext(
TNode<NativeContext> native_context, TNode<Oddball> done) {
- CSA_ASSERT(this, IsBoolean(done));
+ CSA_DCHECK(this, IsBoolean(done));
TNode<Context> context = AllocateSyntheticFunctionContext(
native_context, ValueUnwrapContext::kLength);
@@ -317,7 +317,7 @@ TF_BUILTIN(AsyncIteratorValueUnwrap, AsyncBuiltinsAssembler) {
const TNode<Object> done =
LoadContextElement(context, ValueUnwrapContext::kDoneSlot);
- CSA_ASSERT(this, IsBoolean(CAST(done)));
+ CSA_DCHECK(this, IsBoolean(CAST(done)));
const TNode<Object> unwrapped_value =
CallBuiltin(Builtin::kCreateIterResultObject, context, value, done);
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index 9d15ba0cfd..87c1d443a6 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -65,18 +65,18 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
}
inline void SetGeneratorAwaiting(const TNode<JSGeneratorObject> generator) {
- CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
+ CSA_DCHECK(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
StoreObjectFieldNoWriteBarrier(
generator, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(1));
- CSA_ASSERT(this, IsGeneratorAwaiting(generator));
+ CSA_DCHECK(this, IsGeneratorAwaiting(generator));
}
inline void SetGeneratorNotAwaiting(
const TNode<JSGeneratorObject> generator) {
- CSA_ASSERT(this, IsGeneratorAwaiting(generator));
+ CSA_DCHECK(this, IsGeneratorAwaiting(generator));
StoreObjectFieldNoWriteBarrier(
generator, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(0));
- CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
+ CSA_DCHECK(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
}
inline void CloseGenerator(const TNode<JSGeneratorObject> generator) {
@@ -216,7 +216,7 @@ void AsyncGeneratorBuiltinsAssembler::AsyncGeneratorAwaitResumeClosure(
SetGeneratorNotAwaiting(generator);
- CSA_SLOW_ASSERT(this, IsGeneratorSuspended(generator));
+ CSA_SLOW_DCHECK(this, IsGeneratorSuspended(generator));
// Remember the {resume_mode} for the {generator}.
StoreObjectFieldNoWriteBarrier(generator,
@@ -401,7 +401,7 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
Goto(&start);
BIND(&start);
- CSA_ASSERT(this, IsGeneratorNotExecuting(generator));
+ CSA_DCHECK(this, IsGeneratorNotExecuting(generator));
// Stop resuming if suspended for Await.
ReturnIf(IsGeneratorAwaiting(generator), UndefinedConstant());
@@ -478,7 +478,7 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
const auto done = Parameter<Object>(Descriptor::kDone);
const auto context = Parameter<Context>(Descriptor::kContext);
- CSA_ASSERT(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
+ CSA_DCHECK(this, Word32BinaryNot(IsGeneratorAwaiting(generator)));
// This operation should be called only when the `value` parameter has been
// Await-ed. Typically, this means `value` is not a JSPromise value. However,
diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
index f4af61b1a0..cbae195060 100644
--- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
@@ -161,7 +161,7 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
const TNode<JSFunction> promise_fun =
CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
- CSA_ASSERT(this, IsConstructor(promise_fun));
+ CSA_DCHECK(this, IsConstructor(promise_fun));
// Let valueWrapper be PromiseResolve(%Promise%, « value »).
// IfAbruptRejectPromise(valueWrapper, promiseCapability).
@@ -228,16 +228,16 @@ AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
// Let nextDone be IteratorComplete(nextResult).
// IfAbruptRejectPromise(nextDone, promiseCapability).
- const TNode<Object> done =
+ const TNode<Object> iter_result_done =
GetProperty(context, iter_result, factory()->done_string());
// Let nextValue be IteratorValue(nextResult).
// IfAbruptRejectPromise(nextValue, promiseCapability).
- const TNode<Object> value =
+ const TNode<Object> iter_result_value =
GetProperty(context, iter_result, factory()->value_string());
- var_value = value;
- var_done = done;
+ var_value = iter_result_value;
+ var_done = iter_result_done;
Goto(&merge);
}
diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc
index 30da5207f9..2cb74aa399 100644
--- a/deps/v8/src/builtins/builtins-bigint.cc
+++ b/deps/v8/src/builtins/builtins-bigint.cc
@@ -125,21 +125,21 @@ Object BigIntToStringImpl(Handle<Object> receiver, Handle<Object> radix,
BUILTIN(BigIntPrototypeToLocaleString) {
HandleScope scope(isolate);
- const char* method = "BigInt.prototype.toLocaleString";
+ const char* method_name = "BigInt.prototype.toLocaleString";
#ifdef V8_INTL_SUPPORT
// 1. Let x be ? thisBigIntValue(this value).
Handle<BigInt> x;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, x, ThisBigIntValue(isolate, args.receiver(), method));
+ isolate, x, ThisBigIntValue(isolate, args.receiver(), method_name));
RETURN_RESULT_OR_FAILURE(
isolate,
Intl::NumberToLocaleString(isolate, x, args.atOrUndefined(isolate, 1),
- args.atOrUndefined(isolate, 2), method));
+ args.atOrUndefined(isolate, 2), method_name));
// Fallbacks to old toString implemention if no V8_INTL_SUPPORT
#endif // V8_INTL_SUPPORT
Handle<Object> radix = isolate->factory()->undefined_value();
- return BigIntToStringImpl(args.receiver(), radix, isolate, method);
+ return BigIntToStringImpl(args.receiver(), radix, isolate, method_name);
}
BUILTIN(BigIntPrototypeToString) {
diff --git a/deps/v8/src/builtins/builtins-bigint.tq b/deps/v8/src/builtins/builtins-bigint.tq
index 067fb235de..3cf46ef9bf 100644
--- a/deps/v8/src/builtins/builtins-bigint.tq
+++ b/deps/v8/src/builtins/builtins-bigint.tq
@@ -70,9 +70,9 @@ macro MutableBigIntAbsoluteSub(implicit context: Context)(
const ylength = ReadBigIntLength(y);
const xsign = ReadBigIntSign(x);
- assert(MutableBigIntAbsoluteCompare(x, y) >= 0);
+ dcheck(MutableBigIntAbsoluteCompare(x, y) >= 0);
if (xlength == 0) {
- assert(ylength == 0);
+ dcheck(ylength == 0);
return x;
}
@@ -104,7 +104,7 @@ macro MutableBigIntAbsoluteAdd(implicit context: Context)(
// case: 0n + 0n
if (xlength == 0) {
- assert(ylength == 0);
+ dcheck(ylength == 0);
return x;
}
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index 78003e71bd..8b7b364375 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -280,7 +280,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
TNode<Int32T> length = var_length.value();
{
Label normalize_done(this);
- CSA_ASSERT(this, Int32LessThanOrEqual(
+ CSA_DCHECK(this, Int32LessThanOrEqual(
length, Int32Constant(FixedArray::kMaxLength)));
GotoIfNot(Word32Equal(length, Int32Constant(0)), &normalize_done);
// Make sure we don't accidentally pass along the
@@ -327,14 +327,14 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructDoubleVarargs(
TNode<Int32T> args_count, TNode<Context> context, TNode<Int32T> kind) {
const ElementsKind new_kind = PACKED_ELEMENTS;
const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
- CSA_ASSERT(this, Int32LessThanOrEqual(length,
+ CSA_DCHECK(this, Int32LessThanOrEqual(length,
Int32Constant(FixedArray::kMaxLength)));
TNode<IntPtrT> intptr_length = ChangeInt32ToIntPtr(length);
- CSA_ASSERT(this, WordNotEqual(intptr_length, IntPtrConstant(0)));
+ CSA_DCHECK(this, WordNotEqual(intptr_length, IntPtrConstant(0)));
// Allocate a new FixedArray of Objects.
TNode<FixedArray> new_elements = CAST(AllocateFixedArray(
- new_kind, intptr_length, CodeStubAssembler::kAllowLargeObjectAllocation));
+ new_kind, intptr_length, AllocationFlag::kAllowLargeObjectAllocation));
// CopyFixedArrayElements does not distinguish between holey and packed for
// its first argument, so we don't need to dispatch on {kind} here.
CopyFixedArrayElements(PACKED_DOUBLE_ELEMENTS, elements, new_kind,
@@ -439,7 +439,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
TNode<Int32T> length = LoadAndUntagToWord32ObjectField(
var_js_array.value(), JSArray::kLengthOffset);
TNode<FixedArrayBase> elements = var_elements.value();
- CSA_ASSERT(this, Int32LessThanOrEqual(
+ CSA_DCHECK(this, Int32LessThanOrEqual(
length, Int32Constant(FixedArray::kMaxLength)));
if (!new_target) {
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index b44c70423e..f4885efed8 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -151,7 +151,7 @@ void BaseCollectionsAssembler::AddConstructorEntry(
Label* if_may_have_side_effects, Label* if_exception,
TVariable<Object>* var_exception) {
compiler::ScopedExceptionHandler handler(this, if_exception, var_exception);
- CSA_ASSERT(this, Word32BinaryNot(IsTheHole(key_value)));
+ CSA_DCHECK(this, Word32BinaryNot(IsTheHole(key_value)));
if (variant == kMap || variant == kWeakMap) {
TorqueStructKeyValuePair pair =
if_may_have_side_effects != nullptr
@@ -191,7 +191,7 @@ void BaseCollectionsAssembler::AddConstructorEntries(
TNode<JSArray> initial_entries_jsarray =
UncheckedCast<JSArray>(initial_entries);
#if DEBUG
- CSA_ASSERT(this, IsFastJSArrayWithNoCustomIteration(
+ CSA_DCHECK(this, IsFastJSArrayWithNoCustomIteration(
context, initial_entries_jsarray));
TNode<Map> original_initial_entries_map = LoadMap(initial_entries_jsarray);
#endif
@@ -215,7 +215,7 @@ void BaseCollectionsAssembler::AddConstructorEntries(
Unreachable();
BIND(&if_not_modified);
}
- CSA_ASSERT(this, TaggedEqual(original_initial_entries_map,
+ CSA_DCHECK(this, TaggedEqual(original_initial_entries_map,
LoadMap(initial_entries_jsarray)));
#endif
use_fast_loop = Int32FalseConstant();
@@ -238,13 +238,13 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
TNode<FixedArrayBase> elements = LoadElements(fast_jsarray);
TNode<Int32T> elements_kind = LoadElementsKind(fast_jsarray);
TNode<JSFunction> add_func = GetInitialAddFunction(variant, native_context);
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
TaggedEqual(GetAddFunction(variant, native_context, collection),
add_func));
- CSA_ASSERT(this, IsFastJSArrayWithNoCustomIteration(context, fast_jsarray));
+ CSA_DCHECK(this, IsFastJSArrayWithNoCustomIteration(context, fast_jsarray));
TNode<IntPtrT> length = SmiUntag(LoadFastJSArrayLength(fast_jsarray));
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(length, IntPtrConstant(0)));
- CSA_ASSERT(
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(length, IntPtrConstant(0)));
+ CSA_DCHECK(
this, HasInitialCollectionPrototype(variant, native_context, collection));
#if DEBUG
@@ -277,7 +277,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
// A Map constructor requires entries to be arrays (ex. [key, value]),
// so a FixedDoubleArray can never succeed.
if (variant == kMap || variant == kWeakMap) {
- CSA_ASSERT(this, IntPtrGreaterThan(length, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrGreaterThan(length, IntPtrConstant(0)));
TNode<Object> element =
LoadAndNormalizeFixedDoubleArrayElement(elements, IntPtrConstant(0));
ThrowTypeError(context, MessageTemplate::kIteratorValueNotAnObject,
@@ -296,9 +296,9 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
}
BIND(&exit);
#if DEBUG
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
TaggedEqual(original_collection_map, LoadMap(CAST(collection))));
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
TaggedEqual(original_fast_js_array_map, LoadMap(fast_jsarray)));
#endif
}
@@ -307,14 +307,14 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
Variant variant, TNode<Context> context, TNode<Context> native_context,
TNode<Object> collection, TNode<Object> iterable) {
Label exit(this), loop(this), if_exception(this, Label::kDeferred);
- CSA_ASSERT(this, Word32BinaryNot(IsNullOrUndefined(iterable)));
+ CSA_DCHECK(this, Word32BinaryNot(IsNullOrUndefined(iterable)));
TNode<Object> add_func = GetAddFunction(variant, context, collection);
IteratorBuiltinsAssembler iterator_assembler(this->state());
TorqueStructIteratorRecord iterator =
iterator_assembler.GetIterator(context, iterable);
- CSA_ASSERT(this, Word32BinaryNot(IsUndefined(iterator.object)));
+ CSA_DCHECK(this, Word32BinaryNot(IsUndefined(iterator.object)));
TNode<Map> fast_iterator_result_map = CAST(
LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX));
@@ -402,7 +402,7 @@ TNode<JSObject> BaseCollectionsAssembler::AllocateJSCollection(
TNode<JSObject> BaseCollectionsAssembler::AllocateJSCollectionFast(
TNode<JSFunction> constructor) {
- CSA_ASSERT(this, IsConstructorMap(LoadMap(constructor)));
+ CSA_DCHECK(this, IsConstructorMap(LoadMap(constructor)));
TNode<Map> initial_map =
CAST(LoadJSFunctionPrototypeOrInitialMap(constructor));
return AllocateJSObjectFromMap(initial_map);
@@ -779,7 +779,7 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntry(
not_found);
// Make sure the entry index is within range.
- CSA_ASSERT(
+ CSA_DCHECK(
this,
UintPtrLessThan(
var_entry.value(),
@@ -1081,7 +1081,7 @@ TNode<JSArray> CollectionsBuiltinsAssembler::MapIteratorToList(
TNode<IntPtrT> index;
std::tie(table, index) =
TransitionAndUpdate<JSMapIterator, OrderedHashMap>(iterator);
- CSA_ASSERT(this, IntPtrEqual(index, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrEqual(index, IntPtrConstant(0)));
TNode<IntPtrT> size =
LoadAndUntagObjectField(table, OrderedHashMap::NumberOfElementsOffset());
@@ -1089,8 +1089,9 @@ TNode<JSArray> CollectionsBuiltinsAssembler::MapIteratorToList(
const ElementsKind kind = PACKED_ELEMENTS;
TNode<Map> array_map =
LoadJSArrayElementsMap(kind, LoadNativeContext(context));
- TNode<JSArray> array = AllocateJSArray(kind, array_map, size, SmiTag(size),
- kAllowLargeObjectAllocation);
+ TNode<JSArray> array =
+ AllocateJSArray(kind, array_map, size, SmiTag(size),
+ AllocationFlag::kAllowLargeObjectAllocation);
TNode<FixedArray> elements = CAST(LoadElements(array));
const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
@@ -1128,7 +1129,7 @@ TNode<JSArray> CollectionsBuiltinsAssembler::MapIteratorToList(
BIND(&write_value);
{
- CSA_ASSERT(this, InstanceTypeEqual(LoadInstanceType(iterator),
+ CSA_DCHECK(this, InstanceTypeEqual(LoadInstanceType(iterator),
JS_MAP_VALUE_ITERATOR_TYPE));
TNode<Object> entry_value =
UnsafeLoadFixedArrayElement(table, entry_start_position,
@@ -1187,7 +1188,7 @@ TNode<JSArray> CollectionsBuiltinsAssembler::SetOrSetIteratorToList(
TNode<IntPtrT> iter_index;
std::tie(iter_table, iter_index) =
TransitionAndUpdate<JSSetIterator, OrderedHashSet>(CAST(iterable));
- CSA_ASSERT(this, IntPtrEqual(iter_index, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrEqual(iter_index, IntPtrConstant(0)));
var_table = iter_table;
Goto(&copy);
}
@@ -1200,8 +1201,9 @@ TNode<JSArray> CollectionsBuiltinsAssembler::SetOrSetIteratorToList(
const ElementsKind kind = PACKED_ELEMENTS;
TNode<Map> array_map =
LoadJSArrayElementsMap(kind, LoadNativeContext(context));
- TNode<JSArray> array = AllocateJSArray(kind, array_map, size, SmiTag(size),
- kAllowLargeObjectAllocation);
+ TNode<JSArray> array =
+ AllocateJSArray(kind, array_map, size, SmiTag(size),
+ AllocationFlag::kAllowLargeObjectAllocation);
TNode<FixedArray> elements = CAST(LoadElements(array));
const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
@@ -1272,7 +1274,7 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForSmiKey(
const TNode<IntPtrT> key_untagged = SmiUntag(smi_key);
const TNode<IntPtrT> hash =
ChangeInt32ToIntPtr(ComputeUnseededHash(key_untagged));
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
*result = hash;
FindOrderedHashTableEntry<CollectionType>(
table, hash,
@@ -1287,7 +1289,7 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForStringKey(
TNode<CollectionType> table, TNode<String> key_tagged,
TVariable<IntPtrT>* result, Label* entry_found, Label* not_found) {
const TNode<IntPtrT> hash = ComputeStringHash(key_tagged);
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
*result = hash;
FindOrderedHashTableEntry<CollectionType>(
table, hash,
@@ -1302,7 +1304,7 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForHeapNumberKey(
TNode<CollectionType> table, TNode<HeapNumber> key_heap_number,
TVariable<IntPtrT>* result, Label* entry_found, Label* not_found) {
const TNode<IntPtrT> hash = CallGetHashRaw(key_heap_number);
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
*result = hash;
const TNode<Float64T> key_float = LoadHeapNumberValue(key_heap_number);
FindOrderedHashTableEntry<CollectionType>(
@@ -1318,7 +1320,7 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForBigIntKey(
TNode<CollectionType> table, TNode<BigInt> key_big_int,
TVariable<IntPtrT>* result, Label* entry_found, Label* not_found) {
const TNode<IntPtrT> hash = CallGetHashRaw(key_big_int);
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
*result = hash;
FindOrderedHashTableEntry<CollectionType>(
table, hash,
@@ -1333,7 +1335,7 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntryForOtherKey(
TNode<CollectionType> table, TNode<HeapObject> key_heap_object,
TVariable<IntPtrT>* result, Label* entry_found, Label* not_found) {
const TNode<IntPtrT> hash = GetHash(key_heap_object);
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(hash, IntPtrConstant(0)));
*result = hash;
FindOrderedHashTableEntry<CollectionType>(
table, hash,
@@ -1481,17 +1483,17 @@ CollectionsBuiltinsAssembler::Transition(
Goto(&loop);
BIND(&loop);
{
- TNode<TableType> table = var_table.value();
- TNode<IntPtrT> index = var_index.value();
+ TNode<TableType> current_table = var_table.value();
+ TNode<IntPtrT> current_index = var_index.value();
TNode<Object> next_table =
- LoadObjectField(table, TableType::NextTableOffset());
+ LoadObjectField(current_table, TableType::NextTableOffset());
GotoIf(TaggedIsSmi(next_table), &done_loop);
var_table = CAST(next_table);
- var_index = SmiUntag(
- CAST(CallBuiltin(Builtin::kOrderedHashTableHealIndex,
- NoContextConstant(), table, SmiTag(index))));
+ var_index = SmiUntag(CAST(CallBuiltin(Builtin::kOrderedHashTableHealIndex,
+ NoContextConstant(), current_table,
+ SmiTag(current_index))));
Goto(&loop);
}
BIND(&done_loop);
@@ -2496,14 +2498,14 @@ void WeakCollectionsBuiltinsAssembler::AddEntry(
TNode<HeapObject> WeakCollectionsBuiltinsAssembler::AllocateTable(
Variant variant, TNode<IntPtrT> at_least_space_for) {
// See HashTable::New().
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
IntPtrLessThanOrEqual(IntPtrConstant(0), at_least_space_for));
TNode<IntPtrT> capacity = HashTableComputeCapacity(at_least_space_for);
// See HashTable::NewInternal().
TNode<IntPtrT> length = KeyIndexFromEntry(capacity);
- TNode<FixedArray> table = CAST(
- AllocateFixedArray(HOLEY_ELEMENTS, length, kAllowLargeObjectAllocation));
+ TNode<FixedArray> table = CAST(AllocateFixedArray(
+ HOLEY_ELEMENTS, length, AllocationFlag::kAllowLargeObjectAllocation));
TNode<Map> map =
HeapConstant(EphemeronHashTable::GetMap(ReadOnlyRoots(isolate())));
@@ -2814,7 +2816,7 @@ TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) {
auto key = Parameter<JSReceiver>(Descriptor::kKey);
auto value = Parameter<Object>(Descriptor::kValue);
- CSA_ASSERT(this, IsJSReceiver(key));
+ CSA_DCHECK(this, IsJSReceiver(key));
Label call_runtime(this), if_no_hash(this), if_not_found(this);
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 23d7747491..28af8bfabc 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -189,7 +189,7 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
GotoIf(IsNoClosuresCellMap(feedback_cell_map), &no_closures);
GotoIf(IsOneClosureCellMap(feedback_cell_map), &one_closure);
- CSA_ASSERT(this, IsManyClosuresCellMap(feedback_cell_map),
+ CSA_DCHECK(this, IsManyClosuresCellMap(feedback_cell_map),
feedback_cell_map, feedback_cell);
Goto(&cell_done);
@@ -211,7 +211,7 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
const TNode<IntPtrT> function_map_index = Signed(IntPtrAdd(
DecodeWordFromWord32<SharedFunctionInfo::FunctionMapIndexBits>(flags),
IntPtrConstant(Context::FIRST_FUNCTION_MAP_INDEX)));
- CSA_ASSERT(this, UintPtrLessThanOrEqual(
+ CSA_DCHECK(this, UintPtrLessThanOrEqual(
function_map_index,
IntPtrConstant(Context::LAST_FUNCTION_MAP_INDEX)));
@@ -338,7 +338,7 @@ TNode<JSObject> ConstructorBuiltinsAssembler::FastNewObject(
BIND(&instantiate_map);
return AllocateJSObjectFromMap(initial_map, properties.value(), base::nullopt,
- kNone, kWithSlackTracking);
+ AllocationFlag::kNone, kWithSlackTracking);
}
TNode<Context> ConstructorBuiltinsAssembler::FastNewFunctionContext(
@@ -539,7 +539,7 @@ TNode<HeapObject> ConstructorBuiltinsAssembler::CreateShallowObjectLiteral(
TNode<AllocationSite> allocation_site = CAST(maybe_allocation_site);
TNode<JSObject> boilerplate = LoadBoilerplate(allocation_site);
TNode<Map> boilerplate_map = LoadMap(boilerplate);
- CSA_ASSERT(this, IsJSObjectMap(boilerplate_map));
+ CSA_DCHECK(this, IsJSObjectMap(boilerplate_map));
TVARIABLE(HeapObject, var_properties);
{
@@ -587,7 +587,7 @@ TNode<HeapObject> ConstructorBuiltinsAssembler::CreateShallowObjectLiteral(
Goto(&done);
BIND(&if_copy_elements);
- CSA_ASSERT(this, Word32BinaryNot(
+ CSA_DCHECK(this, Word32BinaryNot(
IsFixedCOWArrayMap(LoadMap(boilerplate_elements))));
auto flags = ExtractFixedArrayFlag::kAllFixedArrays;
var_elements = CloneFixedArray(boilerplate_elements, flags);
@@ -681,7 +681,7 @@ TNode<JSObject> ConstructorBuiltinsAssembler::CreateEmptyObjectLiteral(
TNode<Map> map = LoadObjectFunctionInitialMap(native_context);
// Ensure that slack tracking is disabled for the map.
STATIC_ASSERT(Map::kNoSlackTracking == 0);
- CSA_ASSERT(this, IsClearWord32<Map::Bits3::ConstructionCounterBits>(
+ CSA_DCHECK(this, IsClearWord32<Map::Bits3::ConstructionCounterBits>(
LoadMapBitField3(map)));
TNode<FixedArray> empty_fixed_array = EmptyFixedArrayConstant();
TNode<JSObject> result =
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index 465de8e982..7bd277beaf 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -19,6 +19,7 @@ namespace internal {
// ES #sec-dataview-constructor
BUILTIN(DataViewConstructor) {
+ const char* const kMethodName = "DataView constructor";
HandleScope scope(isolate);
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -33,29 +34,31 @@ BUILTIN(DataViewConstructor) {
Handle<Object> byte_offset = args.atOrUndefined(isolate, 2);
Handle<Object> byte_length = args.atOrUndefined(isolate, 3);
- // 2. If Type(buffer) is not Object, throw a TypeError exception.
- // 3. If buffer does not have an [[ArrayBufferData]] internal slot, throw a
- // TypeError exception.
+ // 2. Perform ? RequireInternalSlot(buffer, [[ArrayBufferData]]).
if (!buffer->IsJSArrayBuffer()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kDataViewNotArrayBuffer));
}
Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(buffer);
- // 4. Let offset be ? ToIndex(byteOffset).
+ // 3. Let offset be ? ToIndex(byteOffset).
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, byte_offset,
Object::ToIndex(isolate, byte_offset, MessageTemplate::kInvalidOffset));
size_t view_byte_offset = byte_offset->Number();
- // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
- // We currently violate the specification at this point. TODO: Fix that.
+ // 4. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ if (array_buffer->was_detached()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kDetachedOperation,
+ isolate->factory()->NewStringFromAsciiChecked(
+ kMethodName)));
+ }
- // 6. Let bufferByteLength be the value of buffer's
- // [[ArrayBufferByteLength]] internal slot.
+ // 5. Let bufferByteLength be buffer.[[ArrayBufferByteLength]].
size_t const buffer_byte_length = array_buffer->byte_length();
- // 7. If offset > bufferByteLength, throw a RangeError exception.
+ // 6. If offset > bufferByteLength, throw a RangeError exception.
if (view_byte_offset > buffer_byte_length) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kInvalidOffset, byte_offset));
@@ -63,11 +66,11 @@ BUILTIN(DataViewConstructor) {
size_t view_byte_length;
if (byte_length->IsUndefined(isolate)) {
- // 8. If byteLength is either not present or undefined, then
+ // 7. If byteLength is undefined, then
// a. Let viewByteLength be bufferByteLength - offset.
view_byte_length = buffer_byte_length - view_byte_offset;
} else {
- // 9. Else,
+ // 8. Else,
// a. Let viewByteLength be ? ToIndex(byteLength).
// b. If offset+viewByteLength > bufferByteLength, throw a
// RangeError exception.
@@ -82,30 +85,45 @@ BUILTIN(DataViewConstructor) {
view_byte_length = byte_length->Number();
}
- // 10. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
- // "%DataViewPrototype%", «[[DataView]], [[ViewedArrayBuffer]],
- // [[ByteLength]], [[ByteOffset]]»).
+ // 9. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
+ // "%DataViewPrototype%", «[[DataView]], [[ViewedArrayBuffer]],
+ // [[ByteLength]], [[ByteOffset]]»).
Handle<JSObject> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
JSObject::New(target, new_target, Handle<AllocationSite>::null()));
+ Handle<JSDataView> data_view = Handle<JSDataView>::cast(result);
for (int i = 0; i < ArrayBufferView::kEmbedderFieldCount; ++i) {
// TODO(v8:10391, saelo): Handle external pointers in EmbedderDataSlot
- Handle<JSDataView>::cast(result)->SetEmbedderField(i, Smi::zero());
+ data_view->SetEmbedderField(i, Smi::zero());
}
- // 11. Set O's [[ViewedArrayBuffer]] internal slot to buffer.
- Handle<JSDataView>::cast(result)->set_buffer(*array_buffer);
+ // We have to set the internal slots before the detached check on step 10 or
+ // the TorqueGeneratedClassVerifier ended up complaining that the slot is
+ // empty or invalid on heap teardown.
+ // The result object is not observable from JavaScript when step 10 early
+ // aborts so it is fine to set internal slots here.
+
+ // 11. Set O.[[ViewedArrayBuffer]] to buffer.
+ data_view->set_buffer(*array_buffer);
- // 12. Set O's [[ByteLength]] internal slot to viewByteLength.
- Handle<JSDataView>::cast(result)->set_byte_length(view_byte_length);
+ // 12. Set O.[[ByteLength]] to viewByteLength.
+ data_view->set_byte_length(view_byte_length);
- // 13. Set O's [[ByteOffset]] internal slot to offset.
- Handle<JSDataView>::cast(result)->set_byte_offset(view_byte_offset);
- Handle<JSDataView>::cast(result)->set_data_pointer(
+ // 13. Set O.[[ByteOffset]] to offset.
+ data_view->set_byte_offset(view_byte_offset);
+ data_view->set_data_pointer(
isolate,
static_cast<uint8_t*>(array_buffer->backing_store()) + view_byte_offset);
+ // 10. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ if (array_buffer->was_detached()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kDetachedOperation,
+ isolate->factory()->NewStringFromAsciiChecked(
+ kMethodName)));
+ }
+
// 14. Return O.
return *result;
}
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index 32c1f4b059..cb264279d5 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -315,31 +315,33 @@ BUILTIN(DatePrototypeSetFullYear) {
Handle<Object> year = args.atOrUndefined(isolate, 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year,
Object::ToNumber(isolate, year));
- double y = year->Number(), m = 0.0, dt = 1.0;
+ double year_double = year->Number(), month_double = 0.0, day_double = 1.0;
int time_within_day = 0;
if (!std::isnan(date->value().Number())) {
int64_t const time_ms = static_cast<int64_t>(date->value().Number());
int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
- int year, month, day;
- isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
- m = month;
- dt = day;
+ int year_int, month_int, day_int;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year_int, &month_int,
+ &day_int);
+ month_double = month_int;
+ day_double = day_int;
}
if (argc >= 2) {
Handle<Object> month = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month,
Object::ToNumber(isolate, month));
- m = month->Number();
+ month_double = month->Number();
if (argc >= 3) {
- Handle<Object> date = args.at(3);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date,
- Object::ToNumber(isolate, date));
- dt = date->Number();
+ Handle<Object> day = args.at(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, day,
+ Object::ToNumber(isolate, day));
+ day_double = day->Number();
}
}
- double time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
+ double time_val =
+ MakeDate(MakeDay(year_double, month_double, day_double), time_within_day);
return SetLocalDateValue(isolate, date, time_val);
}
@@ -534,30 +536,32 @@ BUILTIN(DatePrototypeSetUTCFullYear) {
Handle<Object> year = args.atOrUndefined(isolate, 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year,
Object::ToNumber(isolate, year));
- double y = year->Number(), m = 0.0, dt = 1.0;
+ double year_double = year->Number(), month_double = 0.0, day_double = 1.0;
int time_within_day = 0;
if (!std::isnan(date->value().Number())) {
int64_t const time_ms = static_cast<int64_t>(date->value().Number());
int const days = isolate->date_cache()->DaysFromTime(time_ms);
time_within_day = isolate->date_cache()->TimeInDay(time_ms, days);
- int year, month, day;
- isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
- m = month;
- dt = day;
+ int year_int, month_int, day_int;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year_int, &month_int,
+ &day_int);
+ month_double = month_int;
+ day_double = day_int;
}
if (argc >= 2) {
Handle<Object> month = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month,
Object::ToNumber(isolate, month));
- m = month->Number();
+ month_double = month->Number();
if (argc >= 3) {
- Handle<Object> date = args.at(3);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date,
- Object::ToNumber(isolate, date));
- dt = date->Number();
+ Handle<Object> day = args.at(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, day,
+ Object::ToNumber(isolate, day));
+ day_double = day->Number();
}
}
- double const time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
+ double const time_val =
+ MakeDate(MakeDay(year_double, month_double, day_double), time_within_day);
return *JSDate::SetValue(date, DateCache::TimeClip(time_val));
}
@@ -775,8 +779,8 @@ BUILTIN(DatePrototypeToLocaleDateString) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateToLocaleDateString);
- const char* method = "Date.prototype.toLocaleDateString";
- CHECK_RECEIVER(JSDate, date, method);
+ const char* method_name = "Date.prototype.toLocaleDateString";
+ CHECK_RECEIVER(JSDate, date, method_name);
RETURN_RESULT_OR_FAILURE(
isolate, JSDateTimeFormat::ToLocaleDateTime(
@@ -786,7 +790,7 @@ BUILTIN(DatePrototypeToLocaleDateString) {
args.atOrUndefined(isolate, 2), // options
JSDateTimeFormat::RequiredOption::kDate, // required
JSDateTimeFormat::DefaultsOption::kDate, // defaults
- method)); // method
+ method_name)); // method_name
}
// ecma402 #sup-date.prototype.tolocalestring
@@ -795,8 +799,8 @@ BUILTIN(DatePrototypeToLocaleString) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateToLocaleString);
- const char* method = "Date.prototype.toLocaleString";
- CHECK_RECEIVER(JSDate, date, method);
+ const char* method_name = "Date.prototype.toLocaleString";
+ CHECK_RECEIVER(JSDate, date, method_name);
RETURN_RESULT_OR_FAILURE(
isolate, JSDateTimeFormat::ToLocaleDateTime(
@@ -806,7 +810,7 @@ BUILTIN(DatePrototypeToLocaleString) {
args.atOrUndefined(isolate, 2), // options
JSDateTimeFormat::RequiredOption::kAny, // required
JSDateTimeFormat::DefaultsOption::kAll, // defaults
- method)); // method
+ method_name)); // method_name
}
// ecma402 #sup-date.prototype.tolocaletimestring
@@ -815,8 +819,8 @@ BUILTIN(DatePrototypeToLocaleTimeString) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateToLocaleTimeString);
- const char* method = "Date.prototype.toLocaleTimeString";
- CHECK_RECEIVER(JSDate, date, method);
+ const char* method_name = "Date.prototype.toLocaleTimeString";
+ CHECK_RECEIVER(JSDate, date, method_name);
RETURN_RESULT_OR_FAILURE(
isolate, JSDateTimeFormat::ToLocaleDateTime(
@@ -826,7 +830,7 @@ BUILTIN(DatePrototypeToLocaleTimeString) {
args.atOrUndefined(isolate, 2), // options
JSDateTimeFormat::RequiredOption::kTime, // required
JSDateTimeFormat::DefaultsOption::kTime, // defaults
- method)); // method
+ method_name)); // method_name
}
#endif // V8_INTL_SUPPORT
@@ -872,11 +876,11 @@ BUILTIN(DatePrototypeSetYear) {
Handle<Object> year = args.atOrUndefined(isolate, 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year,
Object::ToNumber(isolate, year));
- double m = 0.0, dt = 1.0, y = year->Number();
- if (!std::isnan(y)) {
- double y_int = DoubleToInteger(y);
- if (0.0 <= y_int && y_int <= 99.0) {
- y = 1900.0 + y_int;
+ double month_double = 0.0, day_double = 1.0, year_double = year->Number();
+ if (!std::isnan(year_double)) {
+ double year_int = DoubleToInteger(year_double);
+ if (0.0 <= year_int && year_int <= 99.0) {
+ year_double = 1900.0 + year_int;
}
}
int time_within_day = 0;
@@ -885,12 +889,14 @@ BUILTIN(DatePrototypeSetYear) {
int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
- int year, month, day;
- isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
- m = month;
- dt = day;
+ int year_int, month_int, day_int;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year_int, &month_int,
+ &day_int);
+ month_double = month_int;
+ day_double = day_int;
}
- double time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
+ double time_val =
+ MakeDate(MakeDay(year_double, month_double, day_double), time_within_day);
return SetLocalDateValue(isolate, date, time_val);
}
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index c63db39b04..f7b94c4059 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -316,7 +316,7 @@ namespace internal {
\
/* Abort */ \
TFC(Abort, Abort) \
- TFC(AbortCSAAssert, Abort) \
+ TFC(AbortCSADcheck, Abort) \
\
/* Built-in functions for Javascript */ \
/* Special internal builtins */ \
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index ff39350725..cdae0cdd33 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -74,7 +74,7 @@ void GeneratorBuiltinsAssembler::InnerResume(
// The generator function should not close the generator by itself, let's
// check it is indeed not closed yet.
- CSA_ASSERT(this, SmiNotEqual(result_continuation, closed));
+ CSA_DCHECK(this, SmiNotEqual(result_continuation, closed));
TNode<Smi> executing = SmiConstant(JSGeneratorObject::kGeneratorExecuting);
GotoIf(SmiEqual(result_continuation, executing), &if_final_return);
@@ -94,21 +94,21 @@ void GeneratorBuiltinsAssembler::InnerResume(
BIND(&if_receiverisclosed);
{
// The {receiver} is closed already.
- TNode<Object> result;
+ TNode<Object> builtin_result;
switch (resume_mode) {
case JSGeneratorObject::kNext:
- result = CallBuiltin(Builtin::kCreateIterResultObject, context,
- UndefinedConstant(), TrueConstant());
+ builtin_result = CallBuiltin(Builtin::kCreateIterResultObject, context,
+ UndefinedConstant(), TrueConstant());
break;
case JSGeneratorObject::kReturn:
- result = CallBuiltin(Builtin::kCreateIterResultObject, context, value,
- TrueConstant());
+ builtin_result = CallBuiltin(Builtin::kCreateIterResultObject, context,
+ value, TrueConstant());
break;
case JSGeneratorObject::kThrow:
- result = CallRuntime(Runtime::kThrow, context, value);
+ builtin_result = CallRuntime(Runtime::kThrow, context, value);
break;
}
- args->PopAndReturn(result);
+ args->PopAndReturn(builtin_result);
}
BIND(&if_receiverisrunning);
@@ -219,7 +219,7 @@ TF_BUILTIN(SuspendGeneratorBaseline, GeneratorBuiltinsAssembler) {
TNode<JSFunction> closure = LoadJSGeneratorObjectFunction(generator);
auto sfi = LoadJSFunctionSharedFunctionInfo(closure);
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32BinaryNot(IsSharedFunctionInfoDontAdaptArguments(sfi)));
TNode<IntPtrT> formal_parameter_count = Signed(ChangeUint32ToWord(
LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(sfi)));
@@ -273,7 +273,7 @@ TF_BUILTIN(ResumeGeneratorBaseline, GeneratorBuiltinsAssembler) {
auto generator = Parameter<JSGeneratorObject>(Descriptor::kGeneratorObject);
TNode<JSFunction> closure = LoadJSGeneratorObjectFunction(generator);
auto sfi = LoadJSFunctionSharedFunctionInfo(closure);
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32BinaryNot(IsSharedFunctionInfoDontAdaptArguments(sfi)));
TNode<IntPtrT> formal_parameter_count = Signed(ChangeUint32ToWord(
LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(sfi)));
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index 03f9fb932a..dc5a49640e 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -323,12 +323,13 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
GotoIfNot(IsPageFlagSet(value, MemoryChunk::kEvacuationCandidateMask),
&next);
- TNode<IntPtrT> object = BitcastTaggedToWord(
- UncheckedParameter<Object>(WriteBarrierDescriptor::kObject));
- Branch(
- IsPageFlagSet(object, MemoryChunk::kSkipEvacuationSlotsRecordingMask),
- &next, &call_incremental_wb);
-
+ {
+ TNode<IntPtrT> object = BitcastTaggedToWord(
+ UncheckedParameter<Object>(WriteBarrierDescriptor::kObject));
+ Branch(
+ IsPageFlagSet(object, MemoryChunk::kSkipEvacuationSlotsRecordingMask),
+ &next, &call_incremental_wb);
+ }
BIND(&call_incremental_wb);
{
TNode<ExternalReference> function = ExternalConstant(
@@ -842,7 +843,7 @@ TF_BUILTIN(CopyDataProperties, SetOrCopyDataPropertiesAssembler) {
auto source = Parameter<Object>(Descriptor::kSource);
auto context = Parameter<Context>(Descriptor::kContext);
- CSA_ASSERT(this, TaggedNotEqual(target, source));
+ CSA_DCHECK(this, TaggedNotEqual(target, source));
Label if_runtime(this, Label::kDeferred);
Return(SetOrCopyDataProperties(context, target, source, &if_runtime, false));
@@ -1049,9 +1050,9 @@ TF_BUILTIN(Abort, CodeStubAssembler) {
TailCallRuntime(Runtime::kAbort, NoContextConstant(), message_id);
}
-TF_BUILTIN(AbortCSAAssert, CodeStubAssembler) {
+TF_BUILTIN(AbortCSADcheck, CodeStubAssembler) {
auto message = Parameter<String>(Descriptor::kMessageOrMessageId);
- TailCallRuntime(Runtime::kAbortCSAAssert, NoContextConstant(), message);
+ TailCallRuntime(Runtime::kAbortCSADcheck, NoContextConstant(), message);
}
void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit(
@@ -1233,7 +1234,7 @@ TF_BUILTIN(GetPropertyWithReceiver, CodeStubAssembler) {
GotoIf(TaggedEqual(on_non_existent,
SmiConstant(OnNonExistent::kThrowReferenceError)),
&throw_reference_error);
- CSA_ASSERT(this, TaggedEqual(on_non_existent,
+ CSA_DCHECK(this, TaggedEqual(on_non_existent,
SmiConstant(OnNonExistent::kReturnUndefined)));
Return(UndefinedConstant());
diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc
index 6a9e0fbad4..dd0410ccd2 100644
--- a/deps/v8/src/builtins/builtins-intl-gen.cc
+++ b/deps/v8/src/builtins/builtins-intl-gen.cc
@@ -29,7 +29,7 @@ class IntlBuiltinsAssembler : public CodeStubAssembler {
TNode<JSArray> AllocateEmptyJSArray(TNode<Context> context);
TNode<IntPtrT> PointerToSeqStringData(TNode<String> seq_string) {
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
IsSequentialStringInstanceType(LoadInstanceType(seq_string)));
STATIC_ASSERT(SeqOneByteString::kHeaderSize ==
SeqTwoByteString::kHeaderSize);
@@ -55,7 +55,7 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
to_direct.TryToDirect(&runtime);
const TNode<Int32T> instance_type = to_direct.instance_type();
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32BinaryNot(IsIndirectStringInstanceType(instance_type)));
GotoIfNot(IsOneByteStringInstanceType(instance_type), &runtime);
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index cff87636cb..6fd36dd8e0 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -30,6 +30,7 @@
#include "src/objects/js-segmenter-inl.h"
#include "src/objects/js-segments-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/option-utils.h"
#include "src/objects/property-descriptor.h"
#include "src/objects/smi.h"
#include "unicode/brkiter.h"
@@ -78,9 +79,9 @@ BUILTIN(NumberFormatSupportedLocalesOf) {
}
BUILTIN(NumberFormatPrototypeFormatToParts) {
- const char* const method = "Intl.NumberFormat.prototype.formatToParts";
+ const char* const method_name = "Intl.NumberFormat.prototype.formatToParts";
HandleScope handle_scope(isolate);
- CHECK_RECEIVER(JSNumberFormat, number_format, method);
+ CHECK_RECEIVER(JSNumberFormat, number_format, method_name);
Handle<Object> x;
if (args.length() >= 2) {
@@ -95,9 +96,10 @@ BUILTIN(NumberFormatPrototypeFormatToParts) {
}
BUILTIN(DateTimeFormatPrototypeResolvedOptions) {
- const char* const method = "Intl.DateTimeFormat.prototype.resolvedOptions";
+ const char* const method_name =
+ "Intl.DateTimeFormat.prototype.resolvedOptions";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSReceiver, format_holder, method);
+ CHECK_RECEIVER(JSReceiver, format_holder, method_name);
// 3. Let dtf be ? UnwrapDateTimeFormat(dtf).
Handle<JSDateTimeFormat> date_time_format;
@@ -121,15 +123,15 @@ BUILTIN(DateTimeFormatSupportedLocalesOf) {
}
BUILTIN(DateTimeFormatPrototypeFormatToParts) {
- const char* const method = "Intl.DateTimeFormat.prototype.formatToParts";
+ const char* const method_name = "Intl.DateTimeFormat.prototype.formatToParts";
HandleScope handle_scope(isolate);
- CHECK_RECEIVER(JSObject, date_format_holder, method);
+ CHECK_RECEIVER(JSObject, date_format_holder, method_name);
Factory* factory = isolate->factory();
if (!date_format_holder->IsJSDateTimeFormat()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- factory->NewStringFromAsciiChecked(method),
+ factory->NewStringFromAsciiChecked(method_name),
date_format_holder));
}
Handle<JSDateTimeFormat> dtf =
@@ -156,12 +158,12 @@ BUILTIN(DateTimeFormatPrototypeFormatToParts) {
// Common code for DateTimeFormatPrototypeFormtRange(|ToParts)
template <class T>
V8_WARN_UNUSED_RESULT Object DateTimeFormatRange(
- BuiltinArguments args, Isolate* isolate, const char* const method,
+ BuiltinArguments args, Isolate* isolate, const char* const method_name,
MaybeHandle<T> (*format)(Isolate*, Handle<JSDateTimeFormat>, double,
double)) {
// 1. Let dtf be this value.
// 2. If Type(dtf) is not Object, throw a TypeError exception.
- CHECK_RECEIVER(JSObject, date_format_holder, method);
+ CHECK_RECEIVER(JSObject, date_format_holder, method_name);
Factory* factory = isolate->factory();
@@ -170,7 +172,7 @@ V8_WARN_UNUSED_RESULT Object DateTimeFormatRange(
if (!date_format_holder->IsJSDateTimeFormat()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- factory->NewStringFromAsciiChecked(method),
+ factory->NewStringFromAsciiChecked(method_name),
date_format_holder));
}
Handle<JSDateTimeFormat> dtf =
@@ -206,16 +208,17 @@ V8_WARN_UNUSED_RESULT Object DateTimeFormatRange(
}
BUILTIN(DateTimeFormatPrototypeFormatRange) {
- const char* const method = "Intl.DateTimeFormat.prototype.formatRange";
+ const char* const method_name = "Intl.DateTimeFormat.prototype.formatRange";
HandleScope handle_scope(isolate);
- return DateTimeFormatRange<String>(args, isolate, method,
+ return DateTimeFormatRange<String>(args, isolate, method_name,
JSDateTimeFormat::FormatRange);
}
BUILTIN(DateTimeFormatPrototypeFormatRangeToParts) {
- const char* const method = "Intl.DateTimeFormat.prototype.formatRangeToParts";
+ const char* const method_name =
+ "Intl.DateTimeFormat.prototype.formatRangeToParts";
HandleScope handle_scope(isolate);
- return DateTimeFormatRange<JSArray>(args, isolate, method,
+ return DateTimeFormatRange<JSArray>(args, isolate, method_name,
JSDateTimeFormat::FormatRangeToParts);
}
@@ -251,7 +254,8 @@ Handle<JSFunction> CreateBoundFunction(Isolate* isolate,
template <class T>
Object LegacyFormatConstructor(BuiltinArguments args, Isolate* isolate,
v8::Isolate::UseCounterFeature feature,
- Handle<Object> constructor, const char* method) {
+ Handle<Object> constructor,
+ const char* method_name) {
isolate->CountUsage(feature);
Handle<JSReceiver> new_target;
// 1. If NewTarget is undefined, let newTarget be the active
@@ -276,7 +280,7 @@ Object LegacyFormatConstructor(BuiltinArguments args, Isolate* isolate,
// 3. Perform ? Initialize<T>(Format, locales, options).
Handle<T> format;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, format, T::New(isolate, map, locales, options, method));
+ isolate, format, T::New(isolate, map, locales, options, method_name));
// 4. Let this be the this value.
if (args.new_target()->IsUndefined(isolate)) {
Handle<Object> receiver = args.receiver();
@@ -290,10 +294,10 @@ Object LegacyFormatConstructor(BuiltinArguments args, Isolate* isolate,
if (ordinary_has_instance_obj->BooleanValue(isolate)) {
if (!receiver->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- isolate->factory()->NewStringFromAsciiChecked(method),
- receiver));
+ isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ isolate->factory()->NewStringFromAsciiChecked(
+ method_name),
+ receiver));
}
Handle<JSReceiver> rec = Handle<JSReceiver>::cast(receiver);
// a. Perform ? DefinePropertyOrThrow(this,
@@ -324,15 +328,15 @@ Object LegacyFormatConstructor(BuiltinArguments args, Isolate* isolate,
template <class T>
Object DisallowCallConstructor(BuiltinArguments args, Isolate* isolate,
v8::Isolate::UseCounterFeature feature,
- const char* method) {
+ const char* method_name) {
isolate->CountUsage(feature);
// 1. If NewTarget is undefined, throw a TypeError exception.
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kConstructorNotFunction,
- isolate->factory()->NewStringFromAsciiChecked(method)));
+ isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromAsciiChecked(
+ method_name)));
}
// [[Construct]]
Handle<JSFunction> target = args.target();
@@ -356,7 +360,7 @@ Object DisallowCallConstructor(BuiltinArguments args, Isolate* isolate,
*/
template <class T>
Object CallOrConstructConstructor(BuiltinArguments args, Isolate* isolate,
- const char* method) {
+ const char* method_name) {
Handle<JSReceiver> new_target;
if (args.new_target()->IsUndefined(isolate)) {
@@ -376,7 +380,7 @@ Object CallOrConstructConstructor(BuiltinArguments args, Isolate* isolate,
isolate, map, JSFunction::GetDerivedMap(isolate, target, new_target));
RETURN_RESULT_OR_FAILURE(isolate,
- T::New(isolate, map, locales, options, method));
+ T::New(isolate, map, locales, options, method_name));
}
} // namespace
@@ -430,11 +434,11 @@ BUILTIN(NumberFormatConstructor) {
BUILTIN(NumberFormatPrototypeResolvedOptions) {
HandleScope scope(isolate);
- const char* const method = "Intl.NumberFormat.prototype.resolvedOptions";
+ const char* const method_name = "Intl.NumberFormat.prototype.resolvedOptions";
// 1. Let nf be the this value.
// 2. If Type(nf) is not Object, throw a TypeError exception.
- CHECK_RECEIVER(JSReceiver, number_format_holder, method);
+ CHECK_RECEIVER(JSReceiver, number_format_holder, method_name);
// 3. Let nf be ? UnwrapNumberFormat(nf)
Handle<JSNumberFormat> number_format;
@@ -446,12 +450,12 @@ BUILTIN(NumberFormatPrototypeResolvedOptions) {
}
BUILTIN(NumberFormatPrototypeFormatNumber) {
- const char* const method = "get Intl.NumberFormat.prototype.format";
+ const char* const method_name = "get Intl.NumberFormat.prototype.format";
HandleScope scope(isolate);
// 1. Let nf be the this value.
// 2. If Type(nf) is not Object, throw a TypeError exception.
- CHECK_RECEIVER(JSReceiver, receiver, method);
+ CHECK_RECEIVER(JSReceiver, receiver, method_name);
// 3. Let nf be ? UnwrapNumberFormat(nf).
Handle<JSNumberFormat> number_format;
@@ -518,12 +522,12 @@ BUILTIN(DateTimeFormatConstructor) {
}
BUILTIN(DateTimeFormatPrototypeFormat) {
- const char* const method = "get Intl.DateTimeFormat.prototype.format";
+ const char* const method_name = "get Intl.DateTimeFormat.prototype.format";
HandleScope scope(isolate);
// 1. Let dtf be this value.
// 2. If Type(dtf) is not Object, throw a TypeError exception.
- CHECK_RECEIVER(JSReceiver, receiver, method);
+ CHECK_RECEIVER(JSReceiver, receiver, method_name);
// 3. Let dtf be ? UnwrapDateTimeFormat(dtf).
Handle<JSDateTimeFormat> format;
@@ -615,12 +619,12 @@ BUILTIN(LocaleConstructor) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kLocale);
- const char* method = "Intl.Locale";
+ const char* method_name = "Intl.Locale";
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kConstructorNotFunction,
- isolate->factory()->NewStringFromAsciiChecked(method)));
+ isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromAsciiChecked(
+ method_name)));
}
// [[Construct]]
Handle<JSFunction> target = args.target();
@@ -657,7 +661,7 @@ BUILTIN(LocaleConstructor) {
Handle<JSReceiver> options_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, options_object,
- Intl::CoerceOptionsToObject(isolate, options, method));
+ CoerceOptionsToObject(isolate, options, method_name));
RETURN_RESULT_OR_FAILURE(
isolate, JSLocale::New(isolate, map, locale_string, options_object));
@@ -953,14 +957,14 @@ BUILTIN(CollatorSupportedLocalesOf) {
}
BUILTIN(CollatorPrototypeCompare) {
- const char* const method = "get Intl.Collator.prototype.compare";
+ const char* const method_name = "get Intl.Collator.prototype.compare";
HandleScope scope(isolate);
// 1. Let collator be this value.
// 2. If Type(collator) is not Object, throw a TypeError exception.
// 3. If collator does not have an [[InitializedCollator]] internal slot,
// throw a TypeError exception.
- CHECK_RECEIVER(JSCollator, collator, method);
+ CHECK_RECEIVER(JSCollator, collator, method_name);
// 4. If collator.[[BoundCompare]] is undefined, then
Handle<Object> bound_compare(collator->bound_compare(), isolate);
@@ -1009,14 +1013,15 @@ BUILTIN(CollatorInternalCompare) {
// 7. Return CompareStrings(collator, X, Y).
icu::Collator* icu_collator = collator->icu_collator().raw();
CHECK_NOT_NULL(icu_collator);
- return *Intl::CompareStrings(isolate, *icu_collator, string_x, string_y);
+ return Smi::FromInt(
+ Intl::CompareStrings(isolate, *icu_collator, string_x, string_y));
}
// ecma402 #sec-%segmentiteratorprototype%.next
BUILTIN(SegmentIteratorPrototypeNext) {
- const char* const method = "%SegmentIterator.prototype%.next";
+ const char* const method_name = "%SegmentIterator.prototype%.next";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSSegmentIterator, segment_iterator, method);
+ CHECK_RECEIVER(JSSegmentIterator, segment_iterator, method_name);
RETURN_RESULT_OR_FAILURE(isolate,
JSSegmentIterator::Next(isolate, segment_iterator));
@@ -1068,9 +1073,9 @@ BUILTIN(SegmenterPrototypeSegment) {
// ecma402 #sec-%segmentsprototype%.containing
BUILTIN(SegmentsPrototypeContaining) {
- const char* const method = "%Segments.prototype%.containing";
+ const char* const method_name = "%Segments.prototype%.containing";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSSegments, segments, method);
+ CHECK_RECEIVER(JSSegments, segments, method_name);
Handle<Object> index = args.atOrUndefined(isolate, 1);
// 6. Let n be ? ToInteger(index).
@@ -1084,9 +1089,9 @@ BUILTIN(SegmentsPrototypeContaining) {
// ecma402 #sec-%segmentsprototype%-@@iterator
BUILTIN(SegmentsPrototypeIterator) {
- const char* const method = "%SegmentIsPrototype%[@@iterator]";
+ const char* const method_name = "%SegmentIsPrototype%[@@iterator]";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSSegments, segments, method);
+ CHECK_RECEIVER(JSSegments, segments, method_name);
RETURN_RESULT_OR_FAILURE(
isolate,
JSSegmentIterator::Create(isolate, segments->icu_break_iterator().raw(),
@@ -1108,10 +1113,11 @@ BUILTIN(V8BreakIteratorPrototypeResolvedOptions) {
}
BUILTIN(V8BreakIteratorPrototypeAdoptText) {
- const char* const method = "get Intl.v8BreakIterator.prototype.adoptText";
+ const char* const method_name =
+ "get Intl.v8BreakIterator.prototype.adoptText";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method);
+ CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method_name);
Handle<Object> bound_adopt_text(break_iterator->bound_adopt_text(), isolate);
if (!bound_adopt_text->IsUndefined(isolate)) {
@@ -1144,10 +1150,10 @@ BUILTIN(V8BreakIteratorInternalAdoptText) {
}
BUILTIN(V8BreakIteratorPrototypeFirst) {
- const char* const method = "get Intl.v8BreakIterator.prototype.first";
+ const char* const method_name = "get Intl.v8BreakIterator.prototype.first";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method);
+ CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method_name);
Handle<Object> bound_first(break_iterator->bound_first(), isolate);
if (!bound_first->IsUndefined(isolate)) {
@@ -1174,10 +1180,10 @@ BUILTIN(V8BreakIteratorInternalFirst) {
}
BUILTIN(V8BreakIteratorPrototypeNext) {
- const char* const method = "get Intl.v8BreakIterator.prototype.next";
+ const char* const method_name = "get Intl.v8BreakIterator.prototype.next";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method);
+ CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method_name);
Handle<Object> bound_next(break_iterator->bound_next(), isolate);
if (!bound_next->IsUndefined(isolate)) {
@@ -1203,10 +1209,10 @@ BUILTIN(V8BreakIteratorInternalNext) {
}
BUILTIN(V8BreakIteratorPrototypeCurrent) {
- const char* const method = "get Intl.v8BreakIterator.prototype.current";
+ const char* const method_name = "get Intl.v8BreakIterator.prototype.current";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method);
+ CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method_name);
Handle<Object> bound_current(break_iterator->bound_current(), isolate);
if (!bound_current->IsUndefined(isolate)) {
@@ -1232,10 +1238,11 @@ BUILTIN(V8BreakIteratorInternalCurrent) {
}
BUILTIN(V8BreakIteratorPrototypeBreakType) {
- const char* const method = "get Intl.v8BreakIterator.prototype.breakType";
+ const char* const method_name =
+ "get Intl.v8BreakIterator.prototype.breakType";
HandleScope scope(isolate);
- CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method);
+ CHECK_RECEIVER(JSV8BreakIterator, break_iterator, method_name);
Handle<Object> bound_break_type(break_iterator->bound_break_type(), isolate);
if (!bound_break_type->IsUndefined(isolate)) {
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.cc b/deps/v8/src/builtins/builtins-lazy-gen.cc
index 4fb5de7eb5..2ef9aa0734 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.cc
+++ b/deps/v8/src/builtins/builtins-lazy-gen.cc
@@ -136,7 +136,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
&maybe_use_sfi_code);
// If it isn't undefined or fixed array it must be a feedback vector.
- CSA_ASSERT(this, IsFeedbackVector(feedback_cell_value));
+ CSA_DCHECK(this, IsFeedbackVector(feedback_cell_value));
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(function, CAST(feedback_cell_value));
@@ -146,7 +146,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
// optimized Code object (we'd have tail-called it above). A usual case would
// be the InterpreterEntryTrampoline to start executing existing bytecode.
BIND(&maybe_use_sfi_code);
- CSA_ASSERT(this, TaggedNotEqual(sfi_code, HeapConstant(BUILTIN_CODE(
+ CSA_DCHECK(this, TaggedNotEqual(sfi_code, HeapConstant(BUILTIN_CODE(
isolate(), CompileLazy))));
StoreObjectField(function, JSFunction::kCodeOffset, ToCodeT(sfi_code));
diff --git a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
index 64dd15bd1e..6c677e922d 100644
--- a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
+++ b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
@@ -55,7 +55,7 @@ class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler {
TNode<RawPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueue(
TNode<Context> native_context) {
- CSA_ASSERT(this, IsNativeContext(native_context));
+ CSA_DCHECK(this, IsNativeContext(native_context));
return LoadExternalPointerFromObject(native_context,
NativeContext::kMicrotaskQueueOffset,
kNativeContextMicrotaskQueueTag);
@@ -105,7 +105,7 @@ TNode<IntPtrT> MicrotaskQueueBuiltinsAssembler::CalculateRingBufferOffset(
void MicrotaskQueueBuiltinsAssembler::PrepareForContext(
TNode<Context> native_context, Label* bailout) {
- CSA_ASSERT(this, IsNativeContext(native_context));
+ CSA_DCHECK(this, IsNativeContext(native_context));
// Skip the microtask execution if the associated context is shutdown.
GotoIf(WordEqual(GetMicrotaskQueue(native_context), IntPtrConstant(0)),
@@ -117,7 +117,7 @@ void MicrotaskQueueBuiltinsAssembler::PrepareForContext(
void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
TNode<Context> current_context, TNode<Microtask> microtask) {
- CSA_ASSERT(this, TaggedIsNotSmi(microtask));
+ CSA_DCHECK(this, TaggedIsNotSmi(microtask));
StoreRoot(RootIndex::kCurrentMicrotask, microtask);
TNode<IntPtrT> saved_entered_context_count = GetEnteredContextCount();
@@ -378,7 +378,7 @@ TNode<IntPtrT> MicrotaskQueueBuiltinsAssembler::GetEnteredContextCount() {
void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext(
TNode<Context> native_context) {
- CSA_ASSERT(this, IsNativeContext(native_context));
+ CSA_DCHECK(this, IsNativeContext(native_context));
auto ref = ExternalReference::handle_scope_implementer_address(isolate());
TNode<RawPtrT> hsi = Load<RawPtrT>(ExternalConstant(ref));
diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc
index f6ff61a704..4eddf5358a 100644
--- a/deps/v8/src/builtins/builtins-number.cc
+++ b/deps/v8/src/builtins/builtins-number.cc
@@ -111,7 +111,7 @@ BUILTIN(NumberPrototypeToFixed) {
// ES6 section 20.1.3.4 Number.prototype.toLocaleString ( [ r1 [ , r2 ] ] )
BUILTIN(NumberPrototypeToLocaleString) {
HandleScope scope(isolate);
- const char* method = "Number.prototype.toLocaleString";
+ const char* method_name = "Number.prototype.toLocaleString";
isolate->CountUsage(v8::Isolate::UseCounterFeature::kNumberToLocaleString);
@@ -126,7 +126,7 @@ BUILTIN(NumberPrototypeToLocaleString) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewTypeError(MessageTemplate::kNotGeneric,
- isolate->factory()->NewStringFromAsciiChecked(method),
+ isolate->factory()->NewStringFromAsciiChecked(method_name),
isolate->factory()->Number_string()));
}
@@ -134,7 +134,7 @@ BUILTIN(NumberPrototypeToLocaleString) {
RETURN_RESULT_OR_FAILURE(
isolate,
Intl::NumberToLocaleString(isolate, value, args.atOrUndefined(isolate, 1),
- args.atOrUndefined(isolate, 2), method));
+ args.atOrUndefined(isolate, 2), method_name));
#else
// Turn the {value} into a String.
return *isolate->factory()->NumberToString(value);
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 558b582789..3e56df803a 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -249,8 +249,9 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
BIND(&if_has_enum_cache);
{
GotoIf(WordEqual(object_enum_length, IntPtrConstant(0)), if_no_properties);
- TNode<FixedArray> values_or_entries = CAST(AllocateFixedArray(
- PACKED_ELEMENTS, object_enum_length, kAllowLargeObjectAllocation));
+ TNode<FixedArray> values_or_entries =
+ CAST(AllocateFixedArray(PACKED_ELEMENTS, object_enum_length,
+ AllocationFlag::kAllowLargeObjectAllocation));
// If in case we have enum_cache,
// we can't detect accessor of object until loop through descriptors.
@@ -278,7 +279,7 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
{
// Currently, we will not invoke getters,
// so, map will not be changed.
- CSA_ASSERT(this, TaggedEqual(map, LoadMap(object)));
+ CSA_DCHECK(this, TaggedEqual(map, LoadMap(object)));
TNode<IntPtrT> descriptor_entry = var_descriptor_number.value();
TNode<Name> next_key =
LoadKeyByDescriptorEntry(descriptors, descriptor_entry);
@@ -293,7 +294,7 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
// If property is accessor, we escape fast path and call runtime.
GotoIf(IsPropertyKindAccessor(kind), if_call_runtime_with_fast_path);
- CSA_ASSERT(this, IsPropertyKindData(kind));
+ CSA_DCHECK(this, IsPropertyKindData(kind));
// If desc is not undefined and desc.[[Enumerable]] is true, then skip to
// the next descriptor.
@@ -346,7 +347,7 @@ TNode<JSArray>
ObjectEntriesValuesBuiltinsAssembler::FinalizeValuesOrEntriesJSArray(
TNode<Context> context, TNode<FixedArray> result, TNode<IntPtrT> size,
TNode<Map> array_map, Label* if_empty) {
- CSA_ASSERT(this, IsJSArrayMap(array_map));
+ CSA_DCHECK(this, IsJSArrayMap(array_map));
GotoIf(IntPtrEqual(size, IntPtrConstant(0)), if_empty);
TNode<JSArray> array = AllocateJSArray(array_map, result, SmiTag(size));
@@ -477,7 +478,7 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
&if_slow);
// Ensure that the {object} doesn't have any elements.
- CSA_ASSERT(this, IsJSObjectMap(object_map));
+ CSA_DCHECK(this, IsJSObjectMap(object_map));
TNode<FixedArrayBase> object_elements = LoadElements(CAST(object));
GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements);
Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements,
@@ -853,7 +854,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
BIND(&if_object);
{
- CSA_ASSERT(this, IsJSReceiver(CAST(receiver)));
+ CSA_DCHECK(this, IsJSReceiver(CAST(receiver)));
var_default = ObjectToStringConstant();
Goto(&checkstringtag);
}
@@ -868,7 +869,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
GotoIf(IsHeapNumberMap(receiver_map), &if_number);
GotoIf(IsSymbolMap(receiver_map), &if_symbol);
GotoIf(IsUndefined(receiver), &return_undefined);
- CSA_ASSERT(this, IsNull(receiver));
+ CSA_DCHECK(this, IsNull(receiver));
Return(NullToStringConstant());
BIND(&return_undefined);
@@ -980,7 +981,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
LoadMapInstanceType(receiver_value_map);
GotoIf(IsBigIntInstanceType(receiver_value_instance_type),
&if_value_is_bigint);
- CSA_ASSERT(this, IsStringInstanceType(receiver_value_instance_type));
+ CSA_DCHECK(this, IsStringInstanceType(receiver_value_instance_type));
Goto(&if_value_is_string);
BIND(&if_value_is_number);
@@ -1096,7 +1097,7 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
BIND(&no_properties);
{
TVARIABLE(Map, map);
- TVARIABLE(HeapObject, properties);
+ TVARIABLE(HeapObject, new_properties);
Label null_proto(this), non_null_proto(this), instantiate_map(this);
Branch(IsNull(prototype), &null_proto, &non_null_proto);
@@ -1105,17 +1106,18 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
{
map = LoadSlowObjectWithNullPrototypeMap(native_context);
if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
- properties =
+ new_properties =
AllocateSwissNameDictionary(SwissNameDictionary::kInitialCapacity);
} else {
- properties = AllocateNameDictionary(NameDictionary::kInitialCapacity);
+ new_properties =
+ AllocateNameDictionary(NameDictionary::kInitialCapacity);
}
Goto(&instantiate_map);
}
BIND(&non_null_proto);
{
- properties = EmptyFixedArrayConstant();
+ new_properties = EmptyFixedArrayConstant();
map = LoadObjectFunctionInitialMap(native_context);
GotoIf(TaggedEqual(prototype, LoadMapPrototype(map.value())),
&instantiate_map);
@@ -1133,7 +1135,7 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
BIND(&instantiate_map);
{
TNode<JSObject> instance =
- AllocateJSObjectFromMap(map.value(), properties.value());
+ AllocateJSObjectFromMap(map.value(), new_properties.value());
args.PopAndReturn(instance);
}
}
@@ -1251,13 +1253,14 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
TNode<IntPtrT> size =
IntPtrAdd(WordSar(frame_size, IntPtrConstant(kTaggedSizeLog2)),
formal_parameter_count);
- TNode<FixedArrayBase> parameters_and_registers =
- AllocateFixedArray(HOLEY_ELEMENTS, size, kAllowLargeObjectAllocation);
+ TNode<FixedArrayBase> parameters_and_registers = AllocateFixedArray(
+ HOLEY_ELEMENTS, size, AllocationFlag::kAllowLargeObjectAllocation);
FillFixedArrayWithValue(HOLEY_ELEMENTS, parameters_and_registers,
IntPtrConstant(0), size, RootIndex::kUndefinedValue);
// TODO(cbruni): support start_offset to avoid double initialization.
- TNode<JSObject> result = AllocateJSObjectFromMap(
- map, base::nullopt, base::nullopt, kNone, kWithSlackTracking);
+ TNode<JSObject> result =
+ AllocateJSObjectFromMap(map, base::nullopt, base::nullopt,
+ AllocationFlag::kNone, kWithSlackTracking);
StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kFunctionOffset,
closure);
StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kContextOffset,
@@ -1294,7 +1297,7 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
auto argc = UncheckedParameter<Int32T>(Descriptor::kJSActualArgumentsCount);
auto context = Parameter<Context>(Descriptor::kContext);
- CSA_ASSERT(this, IsUndefined(Parameter<Object>(Descriptor::kJSNewTarget)));
+ CSA_DCHECK(this, IsUndefined(Parameter<Object>(Descriptor::kJSNewTarget)));
CodeStubArguments args(this, argc);
TNode<Object> object_input = args.GetOptionalArgumentValue(0);
@@ -1496,7 +1499,7 @@ TNode<JSObject> ObjectBuiltinsAssembler::FromPropertyDescriptor(
Goto(&return_desc);
BIND(&bailout);
- CSA_ASSERT(this, Int32Constant(0));
+ CSA_DCHECK(this, Int32Constant(0));
Unreachable();
}
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index 9442b64d06..29eec7c9f5 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -91,7 +91,7 @@ TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
auto proxy = Parameter<JSProxy>(Descriptor::kFunction);
auto context = Parameter<Context>(Descriptor::kContext);
- CSA_ASSERT(this, IsCallable(proxy));
+ CSA_DCHECK(this, IsCallable(proxy));
PerformStackCheck(context);
@@ -103,11 +103,11 @@ TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
CAST(LoadObjectField(proxy, JSProxy::kHandlerOffset));
// 2. If handler is null, throw a TypeError exception.
- CSA_ASSERT(this, IsNullOrJSReceiver(handler));
+ CSA_DCHECK(this, IsNullOrJSReceiver(handler));
GotoIfNot(IsJSReceiver(handler), &throw_proxy_handler_revoked);
// 3. Assert: Type(handler) is Object.
- CSA_ASSERT(this, IsJSReceiver(handler));
+ CSA_DCHECK(this, IsJSReceiver(handler));
// 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
TNode<Object> target = LoadObjectField(proxy, JSProxy::kTargetOffset);
@@ -147,7 +147,7 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
auto new_target = Parameter<Object>(Descriptor::kNewTarget);
auto context = Parameter<Context>(Descriptor::kContext);
- CSA_ASSERT(this, IsCallable(proxy));
+ CSA_DCHECK(this, IsCallable(proxy));
Label throw_proxy_handler_revoked(this, Label::kDeferred),
trap_undefined(this), not_an_object(this, Label::kDeferred);
@@ -157,11 +157,11 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
CAST(LoadObjectField(proxy, JSProxy::kHandlerOffset));
// 2. If handler is null, throw a TypeError exception.
- CSA_ASSERT(this, IsNullOrJSReceiver(handler));
+ CSA_DCHECK(this, IsNullOrJSReceiver(handler));
GotoIfNot(IsJSReceiver(handler), &throw_proxy_handler_revoked);
// 3. Assert: Type(handler) is Object.
- CSA_ASSERT(this, IsJSReceiver(handler));
+ CSA_DCHECK(this, IsJSReceiver(handler));
// 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
TNode<Object> target = LoadObjectField(proxy, JSProxy::kTargetOffset);
@@ -198,7 +198,7 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
BIND(&trap_undefined);
{
// 6.a. Assert: target has a [[Construct]] internal method.
- CSA_ASSERT(this, IsConstructor(CAST(target)));
+ CSA_DCHECK(this, IsConstructor(CAST(target)));
// 6.b. Return ? Construct(target, argumentsList, newTarget).
TailCallStub(CodeFactory::Construct(isolate()), context, target, new_target,
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 6e4307b404..0a75e1bebd 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -89,9 +89,9 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
TNode<Context> context, TNode<Smi> length, TNode<Smi> index,
TNode<String> input, TNode<JSRegExp> regexp, TNode<Number> last_index,
TNode<BoolT> has_indices, TNode<FixedArray>* elements_out) {
- CSA_ASSERT(this, SmiLessThanOrEqual(
+ CSA_DCHECK(this, SmiLessThanOrEqual(
length, SmiConstant(JSArray::kMaxFastArrayLength)));
- CSA_ASSERT(this, SmiGreaterThan(length, SmiConstant(0)));
+ CSA_DCHECK(this, SmiGreaterThan(length, SmiConstant(0)));
// Allocate.
@@ -112,7 +112,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
std::tie(var_array, var_elements) =
AllocateUninitializedJSArrayWithElements(
elements_kind, map, length, no_gc_site, length_intptr,
- kAllowLargeObjectAllocation, JSRegExpResult::kSize);
+ AllocationFlag::kAllowLargeObjectAllocation, JSRegExpResult::kSize);
Goto(&allocated);
}
@@ -124,7 +124,8 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
std::tie(var_array, var_elements) =
AllocateUninitializedJSArrayWithElements(
elements_kind, map, length, no_gc_site, length_intptr,
- kAllowLargeObjectAllocation, JSRegExpResultWithIndices::kSize);
+ AllocationFlag::kAllowLargeObjectAllocation,
+ JSRegExpResultWithIndices::kSize);
Goto(&allocated);
}
@@ -260,19 +261,19 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
{
TNode<IntPtrT> from_cursor = var_from_cursor.value();
TNode<IntPtrT> to_cursor = var_to_cursor.value();
- TNode<Smi> start =
+ TNode<Smi> start_cursor =
CAST(UnsafeLoadFixedArrayElement(match_info, from_cursor));
Label next_iter(this);
- GotoIf(SmiEqual(start, SmiConstant(-1)), &next_iter);
+ GotoIf(SmiEqual(start_cursor, SmiConstant(-1)), &next_iter);
TNode<IntPtrT> from_cursor_plus1 =
IntPtrAdd(from_cursor, IntPtrConstant(1));
- TNode<Smi> end =
+ TNode<Smi> end_cursor =
CAST(UnsafeLoadFixedArrayElement(match_info, from_cursor_plus1));
- TNode<String> capture =
- CAST(CallBuiltin(Builtin::kSubString, context, string, start, end));
+ TNode<String> capture = CAST(CallBuiltin(Builtin::kSubString, context,
+ string, start_cursor, end_cursor));
UnsafeStoreFixedArrayElement(result_elements, to_cursor, capture);
Goto(&next_iter);
@@ -285,7 +286,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
BIND(&named_captures);
{
- CSA_ASSERT(this, SmiGreaterThan(num_results, SmiConstant(1)));
+ CSA_DCHECK(this, SmiGreaterThan(num_results, SmiConstant(1)));
// Preparations for named capture properties. Exit early if the result does
// not have any named captures to minimize performance impact.
@@ -295,7 +296,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
// We reach this point only if captures exist, implying that the assigned
// regexp engine must be able to handle captures.
- CSA_ASSERT(
+ CSA_DCHECK(
this,
Word32Or(
SmiEqual(CAST(LoadFixedArrayElement(data, JSRegExp::kTagIndex)),
@@ -313,7 +314,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
TNode<FixedArray> names = CAST(maybe_names);
TNode<IntPtrT> names_length = LoadAndUntagFixedArrayBaseLength(names);
- CSA_ASSERT(this, IntPtrGreaterThan(names_length, IntPtrZero()));
+ CSA_DCHECK(this, IntPtrGreaterThan(names_length, IntPtrZero()));
// Stash names in case we need them to build the indices array later.
StoreObjectField(result, JSRegExpResult::kNamesOffset, names);
@@ -329,8 +330,8 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
properties = AllocateSwissNameDictionary(num_properties);
} else {
- properties =
- AllocateNameDictionary(num_properties, kAllowLargeObjectAllocation);
+ properties = AllocateNameDictionary(
+ num_properties, AllocationFlag::kAllowLargeObjectAllocation);
}
TNode<JSObject> group_object = AllocateJSObjectFromMap(map, properties);
@@ -338,10 +339,10 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
TVARIABLE(IntPtrT, var_i, IntPtrZero());
- Label loop(this, &var_i);
+ Label inner_loop(this, &var_i);
- Goto(&loop);
- BIND(&loop);
+ Goto(&inner_loop);
+ BIND(&inner_loop);
{
TNode<IntPtrT> i = var_i.value();
TNode<IntPtrT> i_plus_1 = IntPtrAdd(i, IntPtrConstant(1));
@@ -371,7 +372,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
var_i = i_plus_2;
Branch(IntPtrGreaterThanOrEqual(var_i.value(), names_length),
- &maybe_build_indices, &loop);
+ &maybe_build_indices, &inner_loop);
BIND(&add_dictionary_property_slow);
// If the dictionary needs resizing, the above Add call will jump here
@@ -436,8 +437,6 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
// External constants.
TNode<ExternalReference> isolate_address =
ExternalConstant(ExternalReference::isolate_address(isolate()));
- TNode<ExternalReference> regexp_stack_memory_top_address = ExternalConstant(
- ExternalReference::address_of_regexp_stack_memory_top_address(isolate()));
TNode<ExternalReference> static_offsets_vector_address = ExternalConstant(
ExternalReference::address_of_static_offsets_vector(isolate()));
@@ -448,8 +447,8 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
Label if_failure(this);
- CSA_ASSERT(this, IsNumberNormalized(last_index));
- CSA_ASSERT(this, IsNumberPositive(last_index));
+ CSA_DCHECK(this, IsNumberNormalized(last_index));
+ CSA_DCHECK(this, IsNumberPositive(last_index));
GotoIf(TaggedIsNotSmi(last_index), &if_failure);
TNode<IntPtrT> int_string_length = LoadStringLengthAsWord(string);
@@ -546,7 +545,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
{
Label next(this);
GotoIfNot(TaggedIsSmi(var_code.value()), &next);
- CSA_ASSERT(this, SmiEqual(CAST(var_code.value()),
+ CSA_DCHECK(this, SmiEqual(CAST(var_code.value()),
SmiConstant(JSRegExp::kUninitializedValue)));
Goto(&next);
BIND(&next);
@@ -606,26 +605,18 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
MachineType arg5_type = type_int32;
TNode<Int32T> arg5 = SmiToInt32(register_count);
- // Argument 6: Start (high end) of backtracking stack memory area. This
- // argument is ignored in the interpreter.
- TNode<RawPtrT> stack_top = UncheckedCast<RawPtrT>(
- Load(MachineType::Pointer(), regexp_stack_memory_top_address));
-
- MachineType arg6_type = type_ptr;
- TNode<RawPtrT> arg6 = stack_top;
-
- // Argument 7: Indicate that this is a direct call from JavaScript.
- MachineType arg7_type = type_int32;
- TNode<Int32T> arg7 = Int32Constant(RegExp::CallOrigin::kFromJs);
+ // Argument 6: Indicate that this is a direct call from JavaScript.
+ MachineType arg6_type = type_int32;
+ TNode<Int32T> arg6 = Int32Constant(RegExp::CallOrigin::kFromJs);
- // Argument 8: Pass current isolate address.
- MachineType arg8_type = type_ptr;
- TNode<ExternalReference> arg8 = isolate_address;
+ // Argument 7: Pass current isolate address.
+ MachineType arg7_type = type_ptr;
+ TNode<ExternalReference> arg7 = isolate_address;
- // Argument 9: Regular expression object. This argument is ignored in native
+ // Argument 8: Regular expression object. This argument is ignored in native
// irregexp code.
- MachineType arg9_type = type_tagged;
- TNode<JSRegExp> arg9 = regexp;
+ MachineType arg8_type = type_tagged;
+ TNode<JSRegExp> arg8 = regexp;
// TODO(v8:11880): avoid roundtrips between cdc and code.
TNode<RawPtrT> code_entry = LoadCodeObjectEntry(code);
@@ -640,8 +631,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
std::make_pair(arg1_type, arg1), std::make_pair(arg2_type, arg2),
std::make_pair(arg3_type, arg3), std::make_pair(arg4_type, arg4),
std::make_pair(arg5_type, arg5), std::make_pair(arg6_type, arg6),
- std::make_pair(arg7_type, arg7), std::make_pair(arg8_type, arg8),
- std::make_pair(arg9_type, arg9)));
+ std::make_pair(arg7_type, arg7), std::make_pair(arg8_type, arg8)));
// Check the result.
// We expect exactly one result since we force the called regexp to behave
@@ -661,7 +651,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
IntPtrConstant(RegExp::kInternalRegExpFallbackToExperimental)),
&retry_experimental);
- CSA_ASSERT(this, IntPtrEqual(int_result,
+ CSA_DCHECK(this, IntPtrEqual(int_result,
IntPtrConstant(RegExp::kInternalRegExpRetry)));
Goto(&runtime);
}
@@ -738,7 +728,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
TNode<ExternalReference> pending_exception_address =
ExternalConstant(ExternalReference::Create(
IsolateAddressId::kPendingExceptionAddress, isolate()));
- CSA_ASSERT(this, IsTheHole(Load<Object>(pending_exception_address)));
+ CSA_DCHECK(this, IsTheHole(Load<Object>(pending_exception_address)));
#endif // DEBUG
CallRuntime(Runtime::kThrowStackOverflow, context);
Unreachable();
@@ -811,7 +801,7 @@ TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(
TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(
TNode<Context> context, TNode<Object> object) {
- CSA_ASSERT(this, TaggedIsNotSmi(object));
+ CSA_DCHECK(this, TaggedIsNotSmi(object));
return IsFastRegExpNoPrototype(context, object, LoadMap(CAST(object)));
}
@@ -820,7 +810,7 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp(
PrototypeCheckAssembler::Flags prototype_check_flags,
base::Optional<DescriptorIndexNameValue> additional_property_to_check,
Label* if_isunmodified, Label* if_ismodified) {
- CSA_ASSERT(this, TaggedEqual(LoadMap(object), map));
+ CSA_DCHECK(this, TaggedEqual(LoadMap(object), map));
GotoIfForceSlowPath(if_ismodified);
@@ -942,16 +932,16 @@ TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
auto match_info = Parameter<FixedArray>(Descriptor::kMatchInfo);
auto context = Parameter<Context>(Descriptor::kContext);
- CSA_ASSERT(this, TaggedIsPositiveSmi(last_index));
+ CSA_DCHECK(this, TaggedIsPositiveSmi(last_index));
TNode<FixedArray> data = CAST(LoadObjectField(regexp, JSRegExp::kDataOffset));
- CSA_ASSERT(
+ CSA_DCHECK(
this,
SmiEqual(CAST(UnsafeLoadFixedArrayElement(data, JSRegExp::kTagIndex)),
SmiConstant(JSRegExp::ATOM)));
// Callers ensure that last_index is in-bounds.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
UintPtrLessThanOrEqual(SmiUntag(last_index),
LoadStringLengthAsWord(subject_string)));
@@ -963,7 +953,7 @@ TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
//
// This is especially relevant for crbug.com/1075514: atom patterns are
// non-empty and thus guaranteed not to match at the end of the string.
- CSA_ASSERT(this, IntPtrGreaterThan(LoadStringLengthAsWord(needle_string),
+ CSA_DCHECK(this, IntPtrGreaterThan(LoadStringLengthAsWord(needle_string),
IntPtrConstant(0)));
const TNode<Smi> match_from =
@@ -975,8 +965,8 @@ TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
BIND(&if_success);
{
- CSA_ASSERT(this, TaggedIsPositiveSmi(match_from));
- CSA_ASSERT(this, UintPtrLessThan(SmiUntag(match_from),
+ CSA_DCHECK(this, TaggedIsPositiveSmi(match_from));
+ CSA_DCHECK(this, UintPtrLessThan(SmiUntag(match_from),
LoadStringLengthAsWord(subject_string)));
const int kNumRegisters = 2;
@@ -1011,8 +1001,8 @@ TF_BUILTIN(RegExpExecInternal, RegExpBuiltinsAssembler) {
auto match_info = Parameter<RegExpMatchInfo>(Descriptor::kMatchInfo);
auto context = Parameter<Context>(Descriptor::kContext);
- CSA_ASSERT(this, IsNumberNormalized(last_index));
- CSA_ASSERT(this, IsNumberPositive(last_index));
+ CSA_DCHECK(this, IsNumberNormalized(last_index));
+ CSA_DCHECK(this, IsNumberPositive(last_index));
Return(RegExpExecInternal(context, regexp, string, last_index, match_info));
}
@@ -1037,7 +1027,7 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
if (is_fastpath) {
// Refer to JSRegExp's flag property on the fast-path.
- CSA_ASSERT(this, IsJSRegExp(CAST(regexp)));
+ CSA_DCHECK(this, IsJSRegExp(CAST(regexp)));
const TNode<Smi> flags_smi =
CAST(LoadObjectField(CAST(regexp), JSRegExp::kFlagsOffset));
var_flags = SmiUntag(flags_smi);
@@ -1321,12 +1311,12 @@ TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) {
// {maybe_flags} must be undefined in this case, otherwise throw.
{
- Label next(this);
- GotoIf(IsUndefined(maybe_flags), &next);
+ Label maybe_flags_is_undefined(this);
+ GotoIf(IsUndefined(maybe_flags), &maybe_flags_is_undefined);
ThrowTypeError(context, MessageTemplate::kRegExpFlags);
- BIND(&next);
+ BIND(&maybe_flags_is_undefined);
}
const TNode<JSRegExp> pattern = CAST(maybe_pattern);
@@ -1412,8 +1402,8 @@ TNode<BoolT> RegExpBuiltinsAssembler::FlagGetter(TNode<Context> context,
TNode<Number> RegExpBuiltinsAssembler::AdvanceStringIndex(
TNode<String> string, TNode<Number> index, TNode<BoolT> is_unicode,
bool is_fastpath) {
- CSA_ASSERT(this, IsNumberNormalized(index));
- if (is_fastpath) CSA_ASSERT(this, TaggedIsPositiveSmi(index));
+ CSA_DCHECK(this, IsNumberNormalized(index));
+ if (is_fastpath) CSA_DCHECK(this, TaggedIsPositiveSmi(index));
// Default to last_index + 1.
// TODO(pwong): Consider using TrySmiAdd for the fast path to reduce generated
@@ -1437,7 +1427,7 @@ TNode<Number> RegExpBuiltinsAssembler::AdvanceStringIndex(
// Must be in Smi range on the fast path. We control the value of {index}
// on all call-sites and can never exceed the length of the string.
STATIC_ASSERT(String::kMaxLength + 2 < Smi::kMaxValue);
- CSA_ASSERT(this, TaggedIsPositiveSmi(index_plus_one));
+ CSA_DCHECK(this, TaggedIsPositiveSmi(index_plus_one));
}
Label if_isunicode(this), out(this);
@@ -1524,8 +1514,8 @@ TNode<Object> RegExpMatchAllAssembler::CreateRegExpStringIterator(
TNode<JSArray> RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(
TNode<Context> context, TNode<JSRegExp> regexp, TNode<String> string,
const TNode<Smi> limit) {
- CSA_ASSERT(this, IsFastRegExpPermissive(context, regexp));
- CSA_ASSERT(this, Word32BinaryNot(FastFlagGetter(regexp, JSRegExp::kSticky)));
+ CSA_DCHECK(this, IsFastRegExpPermissive(context, regexp));
+ CSA_DCHECK(this, Word32BinaryNot(FastFlagGetter(regexp, JSRegExp::kSticky)));
const TNode<IntPtrT> int_limit = SmiUntag(limit);
@@ -1630,7 +1620,7 @@ TNode<JSArray> RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(
match_indices, RegExpMatchInfo::kFirstCaptureIndex));
const TNode<Smi> match_to = CAST(UnsafeLoadFixedArrayElement(
match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1));
- CSA_ASSERT(this, SmiNotEqual(match_from, string_length));
+ CSA_DCHECK(this, SmiNotEqual(match_from, string_length));
// Advance index and continue if the match is empty.
{
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index ff0b5d4722..154c6d39f8 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -139,9 +139,9 @@ void SharedArrayBufferBuiltinsAssembler::DebugCheckAtomicIndex(
//
// This function must always be called after ValidateIntegerTypedArray, which
// will ensure that LoadJSArrayBufferViewBuffer will not be null.
- CSA_ASSERT(this, Word32BinaryNot(
+ CSA_DCHECK(this, Word32BinaryNot(
IsDetachedBuffer(LoadJSArrayBufferViewBuffer(array))));
- CSA_ASSERT(this, UintPtrLessThan(index, LoadJSTypedArrayLength(array)));
+ CSA_DCHECK(this, UintPtrLessThan(index, LoadJSTypedArrayLength(array)));
}
TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromSigned64(
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 0ce2fd0f17..ceee7b0b94 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -167,8 +167,8 @@ void StringBuiltinsAssembler::StringEqual_Core(
TNode<String> lhs, TNode<Word32T> lhs_instance_type, TNode<String> rhs,
TNode<Word32T> rhs_instance_type, TNode<IntPtrT> length, Label* if_equal,
Label* if_not_equal, Label* if_indirect) {
- CSA_ASSERT(this, WordEqual(LoadStringLengthAsWord(lhs), length));
- CSA_ASSERT(this, WordEqual(LoadStringLengthAsWord(rhs), length));
+ CSA_DCHECK(this, WordEqual(LoadStringLengthAsWord(lhs), length));
+ CSA_DCHECK(this, WordEqual(LoadStringLengthAsWord(rhs), length));
// Fast check to see if {lhs} and {rhs} refer to the same String object.
GotoIf(TaggedEqual(lhs, rhs), if_equal);
@@ -244,8 +244,8 @@ void StringBuiltinsAssembler::StringEqual_Loop(
TNode<String> lhs, TNode<Word32T> lhs_instance_type, MachineType lhs_type,
TNode<String> rhs, TNode<Word32T> rhs_instance_type, MachineType rhs_type,
TNode<IntPtrT> length, Label* if_equal, Label* if_not_equal) {
- CSA_ASSERT(this, WordEqual(LoadStringLengthAsWord(lhs), length));
- CSA_ASSERT(this, WordEqual(LoadStringLengthAsWord(rhs), length));
+ CSA_DCHECK(this, WordEqual(LoadStringLengthAsWord(lhs), length));
+ CSA_DCHECK(this, WordEqual(LoadStringLengthAsWord(rhs), length));
// Compute the effective offset of the first character.
TNode<RawPtrT> lhs_data = DirectStringData(lhs, lhs_instance_type);
@@ -341,7 +341,7 @@ TNode<String> StringBuiltinsAssembler::AllocateConsString(TNode<Uint32T> length,
TNode<String> StringBuiltinsAssembler::StringAdd(
TNode<ContextOrEmptyContext> context, TNode<String> left,
TNode<String> right) {
- CSA_ASSERT(this, IsZeroOrContext(context));
+ CSA_DCHECK(this, IsZeroOrContext(context));
TVARIABLE(String, result);
Label check_right(this), runtime(this, Label::kDeferred), cons(this),
@@ -540,7 +540,7 @@ TF_BUILTIN(StringAdd_CheckNone, StringBuiltinsAssembler) {
auto right = Parameter<String>(Descriptor::kRight);
TNode<ContextOrEmptyContext> context =
UncheckedParameter<ContextOrEmptyContext>(Descriptor::kContext);
- CSA_ASSERT(this, IsZeroOrContext(context));
+ CSA_DCHECK(this, IsZeroOrContext(context));
Return(StringAdd(context, left, right));
}
@@ -965,8 +965,8 @@ TNode<String> StringBuiltinsAssembler::GetSubstitution(
TNode<Context> context, TNode<String> subject_string,
TNode<Smi> match_start_index, TNode<Smi> match_end_index,
TNode<String> replace_string) {
- CSA_ASSERT(this, TaggedIsPositiveSmi(match_start_index));
- CSA_ASSERT(this, TaggedIsPositiveSmi(match_end_index));
+ CSA_DCHECK(this, TaggedIsPositiveSmi(match_start_index));
+ CSA_DCHECK(this, TaggedIsPositiveSmi(match_end_index));
TVARIABLE(String, var_result, replace_string);
Label runtime(this), out(this);
@@ -984,7 +984,7 @@ TNode<String> StringBuiltinsAssembler::GetSubstitution(
BIND(&runtime);
{
- CSA_ASSERT(this, TaggedIsPositiveSmi(dollar_index));
+ CSA_DCHECK(this, TaggedIsPositiveSmi(dollar_index));
const TNode<Object> matched =
CallBuiltin(Builtin::kStringSubstring, context, subject_string,
@@ -1185,8 +1185,8 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
// TypeError exception.
GotoIf(TaggedIsSmi(maybe_regexp), &next);
TNode<HeapObject> heap_maybe_regexp = CAST(maybe_regexp);
- regexp_asm.BranchIfFastRegExp_Strict(context, heap_maybe_regexp, &fast,
- &slow);
+ regexp_asm.BranchIfFastRegExpForMatch(context, heap_maybe_regexp, &fast,
+ &slow);
BIND(&fast);
{
@@ -1260,7 +1260,7 @@ TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
TNode<JSArray> StringBuiltinsAssembler::StringToArray(
TNode<NativeContext> context, TNode<String> subject_string,
TNode<Smi> subject_length, TNode<Number> limit_number) {
- CSA_ASSERT(this, SmiGreaterThan(subject_length, SmiConstant(0)));
+ CSA_DCHECK(this, SmiGreaterThan(subject_length, SmiConstant(0)));
Label done(this), call_runtime(this, Label::kDeferred),
fill_thehole_and_call_runtime(this, Label::kDeferred);
@@ -1299,7 +1299,7 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
// TODO(jkummerow): Implement a CSA version of
// DisallowGarbageCollection and use that to guard
// ToDirectStringAssembler.PointerToData().
- CSA_ASSERT(this, WordEqual(to_direct.PointerToData(&call_runtime),
+ CSA_DCHECK(this, WordEqual(to_direct.PointerToData(&call_runtime),
string_data));
TNode<Int32T> char_code =
UncheckedCast<Int32T>(Load(MachineType::Uint8(), string_data,
@@ -1434,9 +1434,10 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
TNode<Smi> length = smi_zero;
TNode<IntPtrT> capacity = IntPtrConstant(0);
- TNode<JSArray> result = AllocateJSArray(kind, array_map, capacity, length);
+ TNode<JSArray> result_array =
+ AllocateJSArray(kind, array_map, capacity, length);
- args.PopAndReturn(result);
+ args.PopAndReturn(result_array);
}
}
@@ -1478,12 +1479,12 @@ TNode<Int32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
TNode<Int32T> trail = var_trail.value();
// Check that this path is only taken if a surrogate pair is found
- CSA_SLOW_ASSERT(this,
+ CSA_SLOW_DCHECK(this,
Uint32GreaterThanOrEqual(lead, Int32Constant(0xD800)));
- CSA_SLOW_ASSERT(this, Uint32LessThan(lead, Int32Constant(0xDC00)));
- CSA_SLOW_ASSERT(this,
+ CSA_SLOW_DCHECK(this, Uint32LessThan(lead, Int32Constant(0xDC00)));
+ CSA_SLOW_DCHECK(this,
Uint32GreaterThanOrEqual(trail, Int32Constant(0xDC00)));
- CSA_SLOW_ASSERT(this, Uint32LessThan(trail, Int32Constant(0xE000)));
+ CSA_SLOW_DCHECK(this, Uint32LessThan(trail, Int32Constant(0xE000)));
switch (encoding) {
case UnicodeEncoding::UTF16:
@@ -1757,7 +1758,7 @@ TNode<String> StringBuiltinsAssembler::SubString(TNode<String> string,
BIND(&original_string_or_invalid_length);
{
- CSA_ASSERT(this, IntPtrEqual(substr_length, string_length));
+ CSA_DCHECK(this, IntPtrEqual(substr_length, string_length));
// Equal length - check if {from, to} == {0, str.length}.
GotoIf(UintPtrGreaterThan(from, IntPtrConstant(0)), &runtime);
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index 950cefd7ba..d94976bab2 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -140,21 +140,25 @@ BUILTIN(StringPrototypeLocaleCompare) {
HandleScope handle_scope(isolate);
isolate->CountUsage(v8::Isolate::UseCounterFeature::kStringLocaleCompare);
- const char* method = "String.prototype.localeCompare";
+ static const char* const kMethod = "String.prototype.localeCompare";
#ifdef V8_INTL_SUPPORT
- TO_THIS_STRING(str1, method);
+ TO_THIS_STRING(str1, kMethod);
Handle<String> str2;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, str2, Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
- RETURN_RESULT_OR_FAILURE(
- isolate, Intl::StringLocaleCompare(
- isolate, str1, str2, args.atOrUndefined(isolate, 2),
- args.atOrUndefined(isolate, 3), method));
+ base::Optional<int> result = Intl::StringLocaleCompare(
+ isolate, str1, str2, args.atOrUndefined(isolate, 2),
+ args.atOrUndefined(isolate, 3), kMethod);
+ if (!result.has_value()) {
+ DCHECK(isolate->has_pending_exception());
+ return ReadOnlyRoots(isolate).exception();
+ }
+ return Smi::FromInt(result.value());
#else
DCHECK_LE(2, args.length());
- TO_THIS_STRING(str1, method);
+ TO_THIS_STRING(str1, kMethod);
Handle<String> str2;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, str2,
Object::ToString(isolate, args.at(1)));
diff --git a/deps/v8/src/builtins/builtins-string.tq b/deps/v8/src/builtins/builtins-string.tq
index 663ba86cdb..ab2cf2696d 100644
--- a/deps/v8/src/builtins/builtins-string.tq
+++ b/deps/v8/src/builtins/builtins-string.tq
@@ -90,8 +90,8 @@ transitioning builtin StringToList(implicit context: Context)(string: String):
i = i + value.length_intptr;
arrayLength++;
}
- assert(arrayLength >= 0);
- assert(SmiTag(stringLength) >= arrayLength);
+ dcheck(arrayLength >= 0);
+ dcheck(SmiTag(stringLength) >= arrayLength);
array.length = arrayLength;
return array;
@@ -121,7 +121,7 @@ IfInBounds(String, uintptr, uintptr), IfOutOfBounds {
goto IfInBounds(string, index, length);
}
case (indexHeapNumber: HeapNumber): {
- assert(IsNumberNormalized(indexHeapNumber));
+ dcheck(IsNumberNormalized(indexHeapNumber));
// Valid string indices fit into Smi range, so HeapNumber index is
// definitely an out of bounds case.
goto IfOutOfBounds;
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index 0fd0c32340..60f26c63dc 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -154,13 +154,13 @@ TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) {
ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
// Default to zero if the {receiver}s buffer was detached / out of bounds.
- Label detached_or_oob(this), not_detached_or_oob(this);
- IsTypedArrayDetachedOrOutOfBounds(CAST(receiver), &detached_or_oob,
- &not_detached_or_oob);
+ Label detached_or_oob(this), not_detached_nor_oob(this);
+ IsJSTypedArrayDetachedOrOutOfBounds(CAST(receiver), &detached_or_oob,
+ &not_detached_nor_oob);
BIND(&detached_or_oob);
Return(ChangeUintPtrToTagged(UintPtrConstant(0)));
- BIND(&not_detached_or_oob);
+ BIND(&not_detached_nor_oob);
Return(
ChangeUintPtrToTagged(LoadJSArrayBufferViewByteOffset(CAST(receiver))));
}
@@ -192,7 +192,10 @@ TNode<BoolT> TypedArrayBuiltinsAssembler::IsUint8ElementsKind(
TNode<BoolT> TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind(
TNode<Int32T> kind) {
STATIC_ASSERT(BIGUINT64_ELEMENTS + 1 == BIGINT64_ELEMENTS);
- return IsElementsKindInRange(kind, BIGUINT64_ELEMENTS, BIGINT64_ELEMENTS);
+ return Word32Or(
+ IsElementsKindInRange(kind, BIGUINT64_ELEMENTS, BIGINT64_ELEMENTS),
+ IsElementsKindInRange(kind, RAB_GSAB_BIGUINT64_ELEMENTS,
+ RAB_GSAB_BIGINT64_ELEMENTS));
}
TNode<IntPtrT> TypedArrayBuiltinsAssembler::GetTypedArrayElementSize(
@@ -255,9 +258,27 @@ TNode<JSTypedArray> TypedArrayBuiltinsAssembler::ValidateTypedArray(
// If the typed array's buffer is detached, throw
ThrowIfArrayBufferViewBufferIsDetached(context, CAST(obj), method_name);
+ // TODO(v8:11111): Throw if the RAB / GSAB is OOB.
return CAST(obj);
}
+TNode<UintPtrT> TypedArrayBuiltinsAssembler::ValidateTypedArrayAndGetLength(
+ TNode<Context> context, TNode<Object> obj, const char* method_name) {
+ // If it is not a typed array, throw
+ ThrowIfNotInstanceType(context, obj, JS_TYPED_ARRAY_TYPE, method_name);
+
+ Label detached_or_oob(this), not_detached_nor_oob(this);
+ TNode<UintPtrT> length =
+ LoadJSTypedArrayLengthAndCheckDetached(CAST(obj), &detached_or_oob);
+ Goto(&not_detached_nor_oob);
+
+ BIND(&detached_or_oob);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
+
+ BIND(&not_detached_nor_oob);
+ return length;
+}
+
void TypedArrayBuiltinsAssembler::CallCMemmove(TNode<RawPtrT> dest_ptr,
TNode<RawPtrT> src_ptr,
TNode<UintPtrT> byte_length) {
@@ -317,7 +338,7 @@ void TypedArrayBuiltinsAssembler::
CallCCopyFastNumberJSArrayElementsToTypedArray(
TNode<Context> context, TNode<JSArray> source, TNode<JSTypedArray> dest,
TNode<UintPtrT> source_length, TNode<UintPtrT> offset) {
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32BinaryNot(IsBigInt64ElementsKind(LoadElementsKind(dest))));
TNode<ExternalReference> f = ExternalConstant(
ExternalReference::copy_fast_number_jsarray_elements_to_typed_array());
@@ -388,13 +409,14 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
- BIND(&if_##type##array); \
- { \
- case_function(TYPE##_ELEMENTS, sizeof(ctype), 0); \
- Goto(&next); \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, NON_RAB_GSAB_TYPE) \
+ BIND(&if_##type##array); \
+ { \
+ case_function(TYPE##_ELEMENTS, sizeof(ctype), \
+ Context::NON_RAB_GSAB_TYPE##_ARRAY_FUN_INDEX); \
+ Goto(&next); \
}
- RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ RAB_GSAB_TYPED_ARRAYS_WITH_NON_RAB_GSAB_ELEMENTS_KIND(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
BIND(&if_unknown_type);
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index a309f67286..2807745ecb 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -49,6 +49,10 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<Object> obj,
const char* method_name);
+ TNode<UintPtrT> ValidateTypedArrayAndGetLength(TNode<Context> context,
+ TNode<Object> obj,
+ const char* method_name);
+
void CallCMemmove(TNode<RawPtrT> dest_ptr, TNode<RawPtrT> src_ptr,
TNode<UintPtrT> byte_length);
diff --git a/deps/v8/src/builtins/builtins-typed-array.cc b/deps/v8/src/builtins/builtins-typed-array.cc
index d6be81615d..a7827e7d9f 100644
--- a/deps/v8/src/builtins/builtins-typed-array.cc
+++ b/deps/v8/src/builtins/builtins-typed-array.cc
@@ -47,11 +47,12 @@ BUILTIN(TypedArrayPrototypeCopyWithin) {
HandleScope scope(isolate);
Handle<JSTypedArray> array;
- const char* method = "%TypedArray%.prototype.copyWithin";
+ const char* method_name = "%TypedArray%.prototype.copyWithin";
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
+ isolate, array,
+ JSTypedArray::Validate(isolate, args.receiver(), method_name));
- int64_t len = array->length();
+ int64_t len = array->GetLength();
int64_t to = 0;
int64_t from = 0;
int64_t final = len;
@@ -80,11 +81,37 @@ BUILTIN(TypedArrayPrototypeCopyWithin) {
if (count <= 0) return *array;
// TypedArray buffer may have been transferred/detached during parameter
- // processing above. Return early in this case, to prevent potential UAF error
- // TODO(caitp): throw here, as though the full algorithm were performed (the
- // throw would have come from ecma262/#sec-integerindexedelementget)
- // (see )
- if (V8_UNLIKELY(array->WasDetached())) return *array;
+ // processing above.
+ if (V8_UNLIKELY(array->WasDetached())) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kDetachedOperation,
+ isolate->factory()->NewStringFromAsciiChecked(
+ method_name)));
+ }
+
+ if (V8_UNLIKELY(array->is_backed_by_rab())) {
+ bool out_of_bounds = false;
+ int64_t new_len = array->GetLengthOrOutOfBounds(out_of_bounds);
+ if (out_of_bounds) {
+ const MessageTemplate message = MessageTemplate::kDetachedOperation;
+ Handle<String> operation =
+ isolate->factory()->NewStringFromAsciiChecked(method_name);
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(message, operation));
+ }
+ if (new_len < len) {
+ // We don't need to account for growing, since we only copy an already
+ // determined number of elements and growing won't change it. If to >
+ // new_len or from > new_len, the count below will be < 0, so we don't
+ // need to check them separately.
+ if (final > new_len) {
+ final = new_len;
+ }
+ count = std::min<int64_t>(final - from, new_len - to);
+ if (count <= 0) {
+ return *array;
+ }
+ }
+ }
// Ensure processed indexes are within array bounds
DCHECK_GE(from, 0);
@@ -113,9 +140,10 @@ BUILTIN(TypedArrayPrototypeFill) {
HandleScope scope(isolate);
Handle<JSTypedArray> array;
- const char* method = "%TypedArray%.prototype.fill";
+ const char* method_name = "%TypedArray%.prototype.fill";
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
+ isolate, array,
+ JSTypedArray::Validate(isolate, args.receiver(), method_name));
ElementsKind kind = array->GetElementsKind();
Handle<Object> obj_value = args.atOrUndefined(isolate, 1);
@@ -147,17 +175,22 @@ BUILTIN(TypedArrayPrototypeFill) {
}
}
+ if (V8_UNLIKELY(array->WasDetached())) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kDetachedOperation,
+ isolate->factory()->NewStringFromAsciiChecked(
+ method_name)));
+ }
+
if (V8_UNLIKELY(array->IsVariableLength())) {
bool out_of_bounds = false;
array->GetLengthOrOutOfBounds(out_of_bounds);
if (out_of_bounds) {
const MessageTemplate message = MessageTemplate::kDetachedOperation;
Handle<String> operation =
- isolate->factory()->NewStringFromAsciiChecked(method);
+ isolate->factory()->NewStringFromAsciiChecked(method_name);
THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(message, operation));
}
- } else if (V8_UNLIKELY(array->WasDetached())) {
- return *array;
}
int64_t count = end - start;
@@ -178,9 +211,10 @@ BUILTIN(TypedArrayPrototypeIncludes) {
HandleScope scope(isolate);
Handle<JSTypedArray> array;
- const char* method = "%TypedArray%.prototype.includes";
+ const char* method_name = "%TypedArray%.prototype.includes";
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
+ isolate, array,
+ JSTypedArray::Validate(isolate, args.receiver(), method_name));
if (args.length() < 2) return ReadOnlyRoots(isolate).false_value();
@@ -195,10 +229,6 @@ BUILTIN(TypedArrayPrototypeIncludes) {
index = CapRelativeIndex(num, 0, len);
}
- // TODO(cwhan.tunz): throw. See the above comment in CopyWithin.
- if (V8_UNLIKELY(array->WasDetached()))
- return ReadOnlyRoots(isolate).false_value();
-
Handle<Object> search_element = args.atOrUndefined(isolate, 1);
ElementsAccessor* elements = array->GetElementsAccessor();
Maybe<bool> result =
@@ -211,9 +241,10 @@ BUILTIN(TypedArrayPrototypeIndexOf) {
HandleScope scope(isolate);
Handle<JSTypedArray> array;
- const char* method = "%TypedArray%.prototype.indexOf";
+ const char* method_name = "%TypedArray%.prototype.indexOf";
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
+ isolate, array,
+ JSTypedArray::Validate(isolate, args.receiver(), method_name));
int64_t len = array->length();
if (len == 0) return Smi::FromInt(-1);
@@ -226,7 +257,6 @@ BUILTIN(TypedArrayPrototypeIndexOf) {
index = CapRelativeIndex(num, 0, len);
}
- // TODO(cwhan.tunz): throw. See the above comment in CopyWithin.
if (V8_UNLIKELY(array->WasDetached())) return Smi::FromInt(-1);
Handle<Object> search_element = args.atOrUndefined(isolate, 1);
@@ -241,9 +271,10 @@ BUILTIN(TypedArrayPrototypeLastIndexOf) {
HandleScope scope(isolate);
Handle<JSTypedArray> array;
- const char* method = "%TypedArray%.prototype.lastIndexOf";
+ const char* method_name = "%TypedArray%.prototype.lastIndexOf";
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
+ isolate, array,
+ JSTypedArray::Validate(isolate, args.receiver(), method_name));
int64_t len = array->length();
if (len == 0) return Smi::FromInt(-1);
@@ -275,9 +306,10 @@ BUILTIN(TypedArrayPrototypeReverse) {
HandleScope scope(isolate);
Handle<JSTypedArray> array;
- const char* method = "%TypedArray%.prototype.reverse";
+ const char* method_name = "%TypedArray%.prototype.reverse";
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
+ isolate, array,
+ JSTypedArray::Validate(isolate, args.receiver(), method_name));
ElementsAccessor* elements = array->GetElementsAccessor();
elements->Reverse(*array);
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index c01e2dceb1..d0045b43d5 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -112,9 +112,9 @@ const char* Builtins::Lookup(Address pc) {
// May be called during initialization (disassembler).
if (!initialized_) return nullptr;
- for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
- ++builtin) {
- if (code(builtin).contains(isolate_, pc)) return name(builtin);
+ for (Builtin builtin_ix = Builtins::kFirst; builtin_ix <= Builtins::kLast;
+ ++builtin_ix) {
+ if (code(builtin_ix).contains(isolate_, pc)) return name(builtin_ix);
}
return nullptr;
}
@@ -358,7 +358,7 @@ class OffHeapTrampolineGenerator {
// Generate replacement code that simply tail-calls the off-heap code.
DCHECK(!masm_.has_frame());
{
- FrameScope scope(&masm_, StackFrame::NONE);
+ FrameScope scope(&masm_, StackFrame::NO_FRAME_TYPE);
if (type == TrampolineType::kJump) {
masm_.CodeEntry();
masm_.JumpToInstructionStream(off_heap_entry);
diff --git a/deps/v8/src/builtins/cast.tq b/deps/v8/src/builtins/cast.tq
index b12ea5d9fe..c53c970f9c 100644
--- a/deps/v8/src/builtins/cast.tq
+++ b/deps/v8/src/builtins/cast.tq
@@ -793,7 +793,7 @@ macro Is<A : type extends Object, B : type extends Object>(
macro UnsafeCast<A : type extends Object>(implicit context: Context)(o: Object):
A {
- assert(Is<A>(o));
+ dcheck(Is<A>(o));
return %RawDownCast<A>(o);
}
@@ -803,12 +803,12 @@ macro UnsafeConstCast<T: type>(r: const &T):&T {
UnsafeCast<RegExpMatchInfo>(implicit context: Context)(o: Object):
RegExpMatchInfo {
- assert(Is<FixedArray>(o));
+ dcheck(Is<FixedArray>(o));
return %RawDownCast<RegExpMatchInfo>(o);
}
macro UnsafeCast<A : type extends WeakHeapObject>(o: A|Object): A {
- assert(IsWeakOrCleared(o));
+ dcheck(IsWeakOrCleared(o));
return %RawDownCast<A>(o);
}
diff --git a/deps/v8/src/builtins/convert.tq b/deps/v8/src/builtins/convert.tq
index 2849b782c8..6a3c157db8 100644
--- a/deps/v8/src/builtins/convert.tq
+++ b/deps/v8/src/builtins/convert.tq
@@ -29,7 +29,7 @@ FromConstexpr<Smi, constexpr int31>(i: constexpr int31): Smi {
return %FromConstexpr<Smi>(i);
}
FromConstexpr<PositiveSmi, constexpr int31>(i: constexpr int31): PositiveSmi {
- assert(i >= 0);
+ dcheck(i >= 0);
return %FromConstexpr<PositiveSmi>(i);
}
FromConstexpr<String, constexpr string>(s: constexpr string): String {
@@ -232,11 +232,11 @@ Convert<TaggedIndex, intptr>(i: intptr): TaggedIndex {
}
Convert<intptr, uintptr>(ui: uintptr): intptr {
const i = Signed(ui);
- assert(i >= 0);
+ dcheck(i >= 0);
return i;
}
Convert<PositiveSmi, intptr>(i: intptr): PositiveSmi {
- assert(IsValidPositiveSmi(i));
+ dcheck(IsValidPositiveSmi(i));
return %RawDownCast<PositiveSmi>(SmiTag(i));
}
Convert<PositiveSmi, uintptr>(ui: uintptr): PositiveSmi labels IfOverflow {
diff --git a/deps/v8/src/builtins/data-view.tq b/deps/v8/src/builtins/data-view.tq
index 5f61a19472..4acc13b223 100644
--- a/deps/v8/src/builtins/data-view.tq
+++ b/deps/v8/src/builtins/data-view.tq
@@ -6,6 +6,11 @@
namespace data_view {
+const kBuiltinNameByteLength: constexpr string =
+ 'DateView.prototype.byteLength';
+const kBuiltinNameByteOffset: constexpr string =
+ 'DateView.prototype.byteOffset';
+
macro MakeDataViewGetterNameString(kind: constexpr ElementsKind): String {
if constexpr (kind == ElementsKind::UINT8_ELEMENTS) {
return 'DataView.prototype.getUint8';
@@ -85,9 +90,7 @@ javascript builtin DataViewPrototypeGetByteLength(
const dataView: JSDataView =
ValidateDataView(context, receiver, 'get DataView.prototype.byte_length');
if (WasDetached(dataView)) {
- // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
- // here if the JSArrayBuffer of the {dataView} was detached.
- return 0;
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameByteLength);
}
return Convert<Number>(dataView.byte_length);
}
@@ -98,9 +101,7 @@ javascript builtin DataViewPrototypeGetByteOffset(
const dataView: JSDataView =
ValidateDataView(context, receiver, 'get DataView.prototype.byte_offset');
if (WasDetached(dataView)) {
- // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
- // here if the JSArrayBuffer of the {dataView} was detached.
- return 0;
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameByteOffset);
}
return Convert<Number>(dataView.byte_offset);
}
@@ -513,13 +514,14 @@ extern macro TruncateFloat64ToWord32(float64): uint32;
extern macro DataViewBuiltinsAssembler::StoreWord8(
RawPtr, uintptr, uint32): void;
-macro StoreDataView8(buffer: JSArrayBuffer, offset: uintptr, value: uint32) {
+macro StoreDataView8(
+ buffer: JSArrayBuffer, offset: uintptr, value: uint32): void {
StoreWord8(buffer.backing_store_ptr, offset, value & 0xFF);
}
macro StoreDataView16(
buffer: JSArrayBuffer, offset: uintptr, value: uint32,
- requestedLittleEndian: bool) {
+ requestedLittleEndian: bool): void {
const dataPointer: RawPtr = buffer.backing_store_ptr;
const b0: uint32 = value & 0xFF;
@@ -536,7 +538,7 @@ macro StoreDataView16(
macro StoreDataView32(
buffer: JSArrayBuffer, offset: uintptr, value: uint32,
- requestedLittleEndian: bool) {
+ requestedLittleEndian: bool): void {
const dataPointer: RawPtr = buffer.backing_store_ptr;
const b0: uint32 = value & 0xFF;
@@ -559,7 +561,7 @@ macro StoreDataView32(
macro StoreDataView64(
buffer: JSArrayBuffer, offset: uintptr, lowWord: uint32, highWord: uint32,
- requestedLittleEndian: bool) {
+ requestedLittleEndian: bool): void {
const dataPointer: RawPtr = buffer.backing_store_ptr;
const b0: uint32 = lowWord & 0xFF;
@@ -603,7 +605,7 @@ extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntSign(BigIntBase):
// on 64-bit platforms, and the 2 lowest BigInt digits on 32-bit ones.
macro StoreDataViewBigInt(
buffer: JSArrayBuffer, offset: uintptr, bigIntValue: BigInt,
- requestedLittleEndian: bool) {
+ requestedLittleEndian: bool): void {
const length: uint32 = DataViewDecodeBigIntLength(bigIntValue);
const sign: uint32 = DataViewDecodeBigIntSign(bigIntValue);
diff --git a/deps/v8/src/builtins/finalization-registry.tq b/deps/v8/src/builtins/finalization-registry.tq
index 389b9a5ce0..72db154a6f 100644
--- a/deps/v8/src/builtins/finalization-registry.tq
+++ b/deps/v8/src/builtins/finalization-registry.tq
@@ -22,7 +22,7 @@ macro SplitOffTail(weakCell: WeakCell): WeakCell|Undefined {
case (Undefined): {
}
case (tailIsNowAHead: WeakCell): {
- assert(tailIsNowAHead.prev == weakCell);
+ dcheck(tailIsNowAHead.prev == weakCell);
tailIsNowAHead.prev = Undefined;
}
}
@@ -37,7 +37,7 @@ PopClearedCell(finalizationRegistry: JSFinalizationRegistry): WeakCell|
return Undefined;
}
case (weakCell: WeakCell): {
- assert(weakCell.prev == Undefined);
+ dcheck(weakCell.prev == Undefined);
finalizationRegistry.cleared_cells = SplitOffTail(weakCell);
// If the WeakCell has an unregister token, remove the cell from the
@@ -55,7 +55,7 @@ PopClearedCell(finalizationRegistry: JSFinalizationRegistry): WeakCell|
}
transitioning macro PushCell(
- finalizationRegistry: JSFinalizationRegistry, cell: WeakCell) {
+ finalizationRegistry: JSFinalizationRegistry, cell: WeakCell): void {
cell.next = finalizationRegistry.active_cells;
typeswitch (finalizationRegistry.active_cells) {
case (Undefined): {
@@ -69,7 +69,7 @@ transitioning macro PushCell(
transitioning macro
FinalizationRegistryCleanupLoop(implicit context: Context)(
- finalizationRegistry: JSFinalizationRegistry, callback: Callable) {
+ finalizationRegistry: JSFinalizationRegistry, callback: Callable): void {
while (true) {
const weakCellHead = PopClearedCell(finalizationRegistry);
typeswitch (weakCellHead) {
@@ -118,9 +118,9 @@ FinalizationRegistryConstructor(
finalizationRegistry.flags =
SmiTag(FinalizationRegistryFlags{scheduled_for_cleanup: false});
// 7. Set finalizationRegistry.[[Cells]] to be an empty List.
- assert(finalizationRegistry.active_cells == Undefined);
- assert(finalizationRegistry.cleared_cells == Undefined);
- assert(finalizationRegistry.key_map == Undefined);
+ dcheck(finalizationRegistry.active_cells == Undefined);
+ dcheck(finalizationRegistry.cleared_cells == Undefined);
+ dcheck(finalizationRegistry.key_map == Undefined);
// 8. Return finalizationRegistry.
return finalizationRegistry;
}
diff --git a/deps/v8/src/builtins/frame-arguments.tq b/deps/v8/src/builtins/frame-arguments.tq
index 9dd26e2327..a877209b3e 100644
--- a/deps/v8/src/builtins/frame-arguments.tq
+++ b/deps/v8/src/builtins/frame-arguments.tq
@@ -39,7 +39,7 @@ struct FrameWithArgumentsInfo {
// This macro is should only be used in builtins that can be called from
// interpreted or JITted code, not from CSA/Torque builtins (the number of
// returned formal parameters would be wrong).
-// It is difficult to actually check/assert this, since interpreted or JITted
+// It is difficult to actually check/dcheck this, since interpreted or JITted
// frames are StandardFrames, but so are hand-written builtins. Doing that
// more refined check would be prohibitively expensive.
macro GetFrameWithArgumentsInfo(implicit context: Context)():
diff --git a/deps/v8/src/builtins/frames.tq b/deps/v8/src/builtins/frames.tq
index 3e959a094f..121c3bb3e1 100644
--- a/deps/v8/src/builtins/frames.tq
+++ b/deps/v8/src/builtins/frames.tq
@@ -21,7 +21,7 @@ FromConstexpr<FrameType, constexpr FrameType>(t: constexpr FrameType):
Cast<FrameType>(o: Object): FrameType
labels CastError {
if (TaggedIsNotSmi(o)) goto CastError;
- assert(
+ dcheck(
Convert<int32>(BitcastTaggedToWordForTagAndSmiBits(o)) <
Convert<int32>(kFrameTypeCount << kSmiTagSize));
return %RawDownCast<FrameType>(o);
diff --git a/deps/v8/src/builtins/function.tq b/deps/v8/src/builtins/function.tq
index 682fdce4ba..4bd134e25f 100644
--- a/deps/v8/src/builtins/function.tq
+++ b/deps/v8/src/builtins/function.tq
@@ -24,7 +24,8 @@ const kMinDescriptorsForFastBind:
constexpr int31 generates 'JSFunction::kMinDescriptorsForFastBind';
macro CheckAccessor(implicit context: Context)(
- array: DescriptorArray, index: constexpr int32, name: Name) labels Slow {
+ array: DescriptorArray, index: constexpr int32,
+ name: Name): void labels Slow {
const descriptor: DescriptorEntry = array.descriptors[index];
const key: Name|Undefined = descriptor.key;
if (!TaggedEqual(key, name)) goto Slow;
diff --git a/deps/v8/src/builtins/growable-fixed-array-gen.cc b/deps/v8/src/builtins/growable-fixed-array-gen.cc
index e242ced5c6..4644582292 100644
--- a/deps/v8/src/builtins/growable-fixed-array-gen.cc
+++ b/deps/v8/src/builtins/growable-fixed-array-gen.cc
@@ -67,7 +67,7 @@ TNode<JSArray> GrowableFixedArray::ToJSArray(const TNode<Context> context) {
TNode<IntPtrT> GrowableFixedArray::NewCapacity(
TNode<IntPtrT> current_capacity) {
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
IntPtrGreaterThanOrEqual(current_capacity, IntPtrConstant(0)));
// Growth rate is analog to JSObject::NewElementsCapacity:
@@ -82,9 +82,9 @@ TNode<IntPtrT> GrowableFixedArray::NewCapacity(
TNode<FixedArray> GrowableFixedArray::ResizeFixedArray(
const TNode<IntPtrT> element_count, const TNode<IntPtrT> new_capacity) {
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(element_count, IntPtrConstant(0)));
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(new_capacity, IntPtrConstant(0)));
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(new_capacity, element_count));
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(element_count, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(new_capacity, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(new_capacity, element_count));
const TNode<FixedArray> from_array = var_array_.value();
diff --git a/deps/v8/src/builtins/growable-fixed-array.tq b/deps/v8/src/builtins/growable-fixed-array.tq
index af9418b0c9..202422c0d2 100644
--- a/deps/v8/src/builtins/growable-fixed-array.tq
+++ b/deps/v8/src/builtins/growable-fixed-array.tq
@@ -5,19 +5,19 @@
namespace growable_fixed_array {
// TODO(pwong): Support FixedTypedArrays.
struct GrowableFixedArray {
- macro Push(obj: Object) {
+ macro Push(obj: Object): void {
this.EnsureCapacity();
this.array.objects[this.length++] = obj;
}
macro ResizeFixedArray(newCapacity: intptr): FixedArray {
- assert(this.length >= 0);
- assert(newCapacity >= 0);
- assert(newCapacity >= this.length);
+ dcheck(this.length >= 0);
+ dcheck(newCapacity >= 0);
+ dcheck(newCapacity >= this.length);
const first: intptr = 0;
return ExtractFixedArray(this.array, first, this.length, newCapacity);
}
- macro EnsureCapacity() {
- assert(this.length <= this.capacity);
+ macro EnsureCapacity(): void {
+ dcheck(this.length <= this.capacity);
if (this.capacity == this.length) {
// Growth rate is analog to JSObject::NewElementsCapacity:
// new_capacity = (current_capacity + (current_capacity >> 1)) + 16.
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 63aba94fe9..c140a2c812 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -2447,13 +2447,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
StackArgumentsAccessor args(eax);
__ AssertFunction(edi, edx);
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
- // Check that the function is not a "classConstructor".
- Label class_constructor;
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test(FieldOperand(edx, SharedFunctionInfo::kFlagsOffset),
- Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
- __ j(not_zero, &class_constructor);
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
@@ -2534,13 +2528,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ movzx_w(
ecx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ InvokeFunctionCode(edi, no_reg, ecx, eax, InvokeType::kJump);
- // The function is a "classConstructor", need to raise an exception.
- __ bind(&class_constructor);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ push(edi);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
- }
}
namespace {
@@ -2655,44 +2642,56 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// -- eax : the number of arguments
// -- edi : the target to call (can be any Object).
// -----------------------------------
- StackArgumentsAccessor args(eax);
+ Register argc = eax;
+ Register target = edi;
+ Register map = ecx;
+ Register instance_type = edx;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
+
+ StackArgumentsAccessor args(argc);
- Label non_callable, non_function, non_smi, non_jsfunction,
- non_jsboundfunction;
- __ JumpIfSmi(edi, &non_callable);
+ Label non_callable, non_smi, non_callable_jsfunction, non_jsboundfunction,
+ non_proxy, class_constructor;
+ __ JumpIfSmi(target, &non_callable);
__ bind(&non_smi);
- __ LoadMap(ecx, edi);
- __ CmpInstanceTypeRange(ecx, ecx, FIRST_JS_FUNCTION_TYPE,
- LAST_JS_FUNCTION_TYPE);
- __ j(above, &non_jsfunction);
+ __ LoadMap(map, target);
+ __ CmpInstanceTypeRange(map, instance_type, map,
+ FIRST_CALLABLE_JS_FUNCTION_TYPE,
+ LAST_CALLABLE_JS_FUNCTION_TYPE);
+ __ j(above, &non_callable_jsfunction);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET);
- __ bind(&non_jsfunction);
- __ LoadMap(ecx, edi);
- __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
+ __ bind(&non_callable_jsfunction);
+ __ cmpw(instance_type, Immediate(JS_BOUND_FUNCTION_TYPE));
__ j(not_equal, &non_jsboundfunction);
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
RelocInfo::CODE_TARGET);
// Check if target is a proxy and call CallProxy external builtin
__ bind(&non_jsboundfunction);
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ __ LoadMap(map, target);
+ __ test_b(FieldOperand(map, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsCallableBit::kMask));
__ j(zero, &non_callable);
// Call CallProxy external builtin
- __ CmpInstanceType(ecx, JS_PROXY_TYPE);
- __ j(not_equal, &non_function);
+ __ cmpw(instance_type, Immediate(JS_PROXY_TYPE));
+ __ j(not_equal, &non_proxy);
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET);
+ // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ __ bind(&non_proxy);
+ __ cmpw(instance_type, Immediate(JS_CLASS_CONSTRUCTOR_TYPE));
+ __ j(equal, &class_constructor);
+
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
- __ bind(&non_function);
// Overwrite the original receiver with the (original) target.
- __ mov(args.GetReceiverOperand(), edi);
+ __ mov(args.GetReceiverOperand(), target);
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadNativeContextSlot(edi, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2701,8 +2700,18 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ bind(&non_callable);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(edi);
+ __ Push(target);
__ CallRuntime(Runtime::kThrowCalledNonCallable);
+ __ Trap(); // Unreachable.
+ }
+
+ // 4. The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ Push(target);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ __ Trap(); // Unreachable.
}
}
@@ -2773,20 +2782,25 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// the JSFunction on which new was invoked initially)
// -- edi : the constructor to call (can be any Object)
// -----------------------------------
- StackArgumentsAccessor args(eax);
+ Register argc = eax;
+ Register target = edi;
+ Register map = ecx;
+ DCHECK(!AreAliased(argc, target, map));
+
+ StackArgumentsAccessor args(argc);
// Check if target is a Smi.
Label non_constructor, non_proxy, non_jsfunction, non_jsboundfunction;
- __ JumpIfSmi(edi, &non_constructor);
+ __ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ __ mov(map, FieldOperand(target, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(map, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsConstructorBit::kMask));
__ j(zero, &non_constructor);
// Dispatch based on instance type.
- __ CmpInstanceTypeRange(ecx, ecx, FIRST_JS_FUNCTION_TYPE,
+ __ CmpInstanceTypeRange(map, map, map, FIRST_JS_FUNCTION_TYPE,
LAST_JS_FUNCTION_TYPE);
__ j(above, &non_jsfunction);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
@@ -2795,15 +2809,15 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Only dispatch to bound functions after checking whether they are
// constructors.
__ bind(&non_jsfunction);
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
+ __ mov(map, FieldOperand(target, HeapObject::kMapOffset));
+ __ CmpInstanceType(map, JS_BOUND_FUNCTION_TYPE);
__ j(not_equal, &non_jsboundfunction);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
RelocInfo::CODE_TARGET);
// Only dispatch to proxies after checking whether they are constructors.
__ bind(&non_jsboundfunction);
- __ CmpInstanceType(ecx, JS_PROXY_TYPE);
+ __ CmpInstanceType(map, JS_PROXY_TYPE);
__ j(not_equal, &non_proxy);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
RelocInfo::CODE_TARGET);
@@ -2812,9 +2826,10 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
- __ mov(args.GetReceiverOperand(), edi);
+ __ mov(args.GetReceiverOperand(), target);
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadNativeContextSlot(edi, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target,
+ Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/builtins/ic-callable.tq b/deps/v8/src/builtins/ic-callable.tq
index dd29e8bf5e..4e8c9691fa 100644
--- a/deps/v8/src/builtins/ic-callable.tq
+++ b/deps/v8/src/builtins/ic-callable.tq
@@ -21,7 +21,7 @@ macro InSameNativeContext(lhs: Context, rhs: Context): bool {
macro MaybeObjectToStrong(maybeObject: MaybeObject):
HeapObject labels IfCleared {
- assert(IsWeakOrCleared(maybeObject));
+ dcheck(IsWeakOrCleared(maybeObject));
const weakObject = %RawDownCast<Weak<HeapObject>>(maybeObject);
return WeakToStrong(weakObject) otherwise IfCleared;
}
@@ -91,10 +91,10 @@ macro SetCallFeedbackContent(implicit context: Context)(
macro CollectCallFeedback(
maybeTarget: JSAny, maybeReceiver: Lazy<JSAny>, context: Context,
maybeFeedbackVector: Undefined|FeedbackVector, slotId: uintptr): void {
- // TODO(v8:9891): Remove this assert once all callers are ported to Torque.
- // This assert ensures correctness of maybeFeedbackVector's type which can
+ // TODO(v8:9891): Remove this dcheck once all callers are ported to Torque.
+ // This dcheck ensures correctness of maybeFeedbackVector's type which can
// be easily broken for calls from CSA.
- assert(
+ dcheck(
IsUndefined(maybeFeedbackVector) ||
Is<FeedbackVector>(maybeFeedbackVector));
const feedbackVector =
@@ -158,7 +158,7 @@ macro CollectCallFeedback(
SetCallFeedbackContent(
feedbackVector, slotId, CallFeedbackContent::kReceiver);
} else {
- assert(!FeedbackValueIsReceiver(feedbackVector, slotId));
+ dcheck(!FeedbackValueIsReceiver(feedbackVector, slotId));
}
TryInitializeAsMonomorphic(recordedFunction, feedbackVector, slotId)
otherwise TransitionToMegamorphic;
@@ -170,10 +170,10 @@ macro CollectCallFeedback(
macro CollectInstanceOfFeedback(
maybeTarget: JSAny, context: Context,
maybeFeedbackVector: Undefined|FeedbackVector, slotId: uintptr): void {
- // TODO(v8:9891): Remove this assert once all callers are ported to Torque.
- // This assert ensures correctness of maybeFeedbackVector's type which can
+ // TODO(v8:9891): Remove this dcheck once all callers are ported to Torque.
+ // This dcheck ensures correctness of maybeFeedbackVector's type which can
// be easily broken for calls from CSA.
- assert(
+ dcheck(
IsUndefined(maybeFeedbackVector) ||
Is<FeedbackVector>(maybeFeedbackVector));
const feedbackVector =
@@ -228,10 +228,10 @@ macro CollectConstructFeedback(implicit context: Context)(
updateFeedbackMode: constexpr UpdateFeedbackMode):
never labels ConstructGeneric,
ConstructArray(AllocationSite) {
- // TODO(v8:9891): Remove this assert once all callers are ported to Torque.
- // This assert ensures correctness of maybeFeedbackVector's type which can
+ // TODO(v8:9891): Remove this dcheck once all callers are ported to Torque.
+ // This dcheck ensures correctness of maybeFeedbackVector's type which can
// be easily broken for calls from CSA.
- assert(
+ dcheck(
IsUndefined(maybeFeedbackVector) ||
Is<FeedbackVector>(maybeFeedbackVector));
diff --git a/deps/v8/src/builtins/ic-dynamic-check-maps.tq b/deps/v8/src/builtins/ic-dynamic-check-maps.tq
index 691f793b56..3e194116fd 100644
--- a/deps/v8/src/builtins/ic-dynamic-check-maps.tq
+++ b/deps/v8/src/builtins/ic-dynamic-check-maps.tq
@@ -22,7 +22,7 @@ macro PerformPolymorphicCheck(
const polymorphicArray = UnsafeCast<WeakFixedArray>(expectedPolymorphicArray);
const weakActualMap = MakeWeak(actualMap);
const length = polymorphicArray.length_intptr;
- assert(length > 0);
+ dcheck(length > 0);
for (let mapIndex: intptr = 0; mapIndex < length;
mapIndex += FeedbackIteratorEntrySize()) {
@@ -30,7 +30,7 @@ macro PerformPolymorphicCheck(
UnsafeCast<WeakHeapObject>(polymorphicArray[mapIndex]);
if (maybeCachedMap == weakActualMap) {
const handlerIndex = mapIndex + FeedbackIteratorHandlerOffset();
- assert(handlerIndex < length);
+ dcheck(handlerIndex < length);
const maybeHandler =
Cast<Object>(polymorphicArray[handlerIndex]) otherwise unreachable;
if (TaggedEqual(maybeHandler, actualHandler)) {
@@ -49,7 +49,7 @@ macro PerformMonomorphicCheck(
actualMap: Map, actualHandler: Smi|DataHandler): int32 {
if (TaggedEqual(expectedMap, actualMap)) {
const handlerIndex = slotIndex + 1;
- assert(handlerIndex < feedbackVector.length_intptr);
+ dcheck(handlerIndex < feedbackVector.length_intptr);
const maybeHandler =
Cast<Object>(feedbackVector[handlerIndex]) otherwise unreachable;
if (TaggedEqual(actualHandler, maybeHandler)) {
diff --git a/deps/v8/src/builtins/ic.tq b/deps/v8/src/builtins/ic.tq
index a9e92cf63e..110ed88503 100644
--- a/deps/v8/src/builtins/ic.tq
+++ b/deps/v8/src/builtins/ic.tq
@@ -62,7 +62,8 @@ extern macro StoreFeedbackVectorSlot(
constexpr int32): void;
extern macro StoreWeakReferenceInFeedbackVector(
FeedbackVector, uintptr, HeapObject): MaybeObject;
-extern macro ReportFeedbackUpdate(FeedbackVector, uintptr, constexpr string);
+extern macro ReportFeedbackUpdate(
+ FeedbackVector, uintptr, constexpr string): void;
extern operator '.length_intptr' macro LoadFeedbackVectorLength(FeedbackVector):
intptr;
diff --git a/deps/v8/src/builtins/internal-coverage.tq b/deps/v8/src/builtins/internal-coverage.tq
index 07bfc40d8f..ec5026861e 100644
--- a/deps/v8/src/builtins/internal-coverage.tq
+++ b/deps/v8/src/builtins/internal-coverage.tq
@@ -17,8 +17,8 @@ macro GetCoverageInfo(implicit context: Context)(function: JSFunction):
}
macro IncrementBlockCount(implicit context: Context)(
- coverageInfo: CoverageInfo, slot: Smi) {
- assert(Convert<int32>(slot) < coverageInfo.slot_count);
+ coverageInfo: CoverageInfo, slot: Smi): void {
+ dcheck(Convert<int32>(slot) < coverageInfo.slot_count);
++coverageInfo.slots[slot].block_count;
}
diff --git a/deps/v8/src/builtins/internal.tq b/deps/v8/src/builtins/internal.tq
index d0863f13a0..adf513edf4 100644
--- a/deps/v8/src/builtins/internal.tq
+++ b/deps/v8/src/builtins/internal.tq
@@ -19,10 +19,10 @@ builtin GetTemplateObject(
// handler; the current advantage of the split implementation is that the
// bytecode can skip most work if feedback exists.
- // TODO(v8:9891): Remove this assert once all callers are ported to Torque.
- // This assert ensures correctness of maybeFeedbackVector's type which can
+ // TODO(v8:9891): Remove this dcheck once all callers are ported to Torque.
+ // This dcheck ensures correctness of maybeFeedbackVector's type which can
// be easily broken for calls from CSA.
- assert(
+ dcheck(
IsUndefined(maybeFeedbackVector) ||
Is<FeedbackVector>(maybeFeedbackVector));
try {
@@ -52,14 +52,14 @@ extern transitioning builtin ForInFilter(implicit context: Context)(
extern enum ForInFeedback extends uint31 { kAny, ...}
extern macro UpdateFeedback(
SmiTagged<ForInFeedback>, Undefined | FeedbackVector, uintptr,
- constexpr UpdateFeedbackMode);
+ constexpr UpdateFeedbackMode): void;
@export
transitioning macro ForInNextSlow(
context: Context, slot: uintptr, receiver: JSAnyNotSmi, key: JSAny,
cacheType: Object, maybeFeedbackVector: Undefined|FeedbackVector,
guaranteedFeedback: constexpr UpdateFeedbackMode): JSAny {
- assert(receiver.map != cacheType); // Handled on the fast path.
+ dcheck(receiver.map != cacheType); // Handled on the fast path.
UpdateFeedback(
SmiTag<ForInFeedback>(ForInFeedback::kAny), maybeFeedbackVector, slot,
guaranteedFeedback);
diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq
index c2652e7eb0..0511c0aa69 100644
--- a/deps/v8/src/builtins/iterator.tq
+++ b/deps/v8/src/builtins/iterator.tq
@@ -52,10 +52,10 @@ transitioning builtin GetIteratorWithFeedback(
context: Context, receiver: JSAny, loadSlot: TaggedIndex,
callSlot: TaggedIndex,
maybeFeedbackVector: Undefined|FeedbackVector): JSAny {
- // TODO(v8:9891): Remove this assert once all callers are ported to Torque.
- // This assert ensures correctness of maybeFeedbackVector's type which can
+ // TODO(v8:9891): Remove this dcheck once all callers are ported to Torque.
+ // This dcheck ensures correctness of maybeFeedbackVector's type which can
// be easily broken for calls from CSA.
- assert(
+ dcheck(
IsUndefined(maybeFeedbackVector) ||
Is<FeedbackVector>(maybeFeedbackVector));
let iteratorMethod: JSAny;
@@ -117,7 +117,7 @@ transitioning builtin CallIteratorWithFeedback(
// https://tc39.es/ecma262/#sec-iteratorclose
@export
transitioning macro IteratorCloseOnException(implicit context: Context)(
- iterator: IteratorRecord) {
+ iterator: IteratorRecord): void {
try {
// 4. Let innerResult be GetMethod(iterator, "return").
const method = GetProperty(iterator.object, kReturnString);
diff --git a/deps/v8/src/builtins/math.tq b/deps/v8/src/builtins/math.tq
index fbcf35fedc..b3d2aafb56 100644
--- a/deps/v8/src/builtins/math.tq
+++ b/deps/v8/src/builtins/math.tq
@@ -416,7 +416,7 @@ MathHypot(
} else if (max == 0) {
return 0;
}
- assert(max > 0);
+ dcheck(max > 0);
// Kahan summation to avoid rounding errors.
// Normalize the numbers to the largest one to avoid overflow.
diff --git a/deps/v8/src/builtins/number.tq b/deps/v8/src/builtins/number.tq
index f4bd4cc578..777dd210d6 100644
--- a/deps/v8/src/builtins/number.tq
+++ b/deps/v8/src/builtins/number.tq
@@ -62,7 +62,7 @@ transitioning macro ThisNumberValue(implicit context: Context)(
}
macro ToCharCode(input: int32): char8 {
- assert(0 <= input && input < 36);
+ dcheck(0 <= input && input < 36);
return input < 10 ?
%RawDownCast<char8>(Unsigned(input + kAsciiZero)) :
%RawDownCast<char8>(Unsigned(input - 10 + kAsciiLowerCaseA));
@@ -78,7 +78,7 @@ macro NumberToStringSmi(x: int32, radix: int32): String labels Slow {
return StringFromSingleCharCode(ToCharCode(n));
}
} else {
- assert(isNegative);
+ dcheck(isNegative);
if (n == kMinInt32) {
goto Slow;
}
@@ -92,7 +92,7 @@ macro NumberToStringSmi(x: int32, radix: int32): String labels Slow {
temp = temp / radix;
length = length + 1;
}
- assert(length > 0);
+ dcheck(length > 0);
const strSeq = AllocateNonEmptySeqOneByteString(Unsigned(length));
let cursor: intptr = Convert<intptr>(length) - 1;
while (n > 0) {
@@ -102,15 +102,15 @@ macro NumberToStringSmi(x: int32, radix: int32): String labels Slow {
cursor = cursor - 1;
}
if (isNegative) {
- assert(cursor == 0);
+ dcheck(cursor == 0);
// Insert '-' to result.
*UnsafeConstCast(&strSeq.chars[0]) = 45;
} else {
- assert(cursor == -1);
+ dcheck(cursor == -1);
// In sync with Factory::SmiToString: If radix = 10 and positive number,
// update hash for string.
if (radix == 10) {
- assert(strSeq.raw_hash_field == kNameEmptyHashField);
+ dcheck(strSeq.raw_hash_field == kNameEmptyHashField);
strSeq.raw_hash_field = MakeArrayIndexHash(Unsigned(x), Unsigned(length));
}
}
diff --git a/deps/v8/src/builtins/object-fromentries.tq b/deps/v8/src/builtins/object-fromentries.tq
index 81a0859d29..34ab73148f 100644
--- a/deps/v8/src/builtins/object-fromentries.tq
+++ b/deps/v8/src/builtins/object-fromentries.tq
@@ -58,7 +58,7 @@ ObjectFromEntries(
const fastIteratorResultMap: Map = GetIteratorResultMap();
let i: iterator::IteratorRecord = iterator::GetIterator(iterable);
try {
- assert(!IsNullOrUndefined(i.object));
+ dcheck(!IsNullOrUndefined(i.object));
while (true) {
const step: JSReceiver =
iterator::IteratorStep(i, fastIteratorResultMap)
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 4c2533e68d..4ea4332e19 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -2047,14 +2047,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -----------------------------------
__ AssertFunction(r4);
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
- // Check that the function is not a "classConstructor".
- Label class_constructor;
__ LoadTaggedPointerField(
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
- __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset));
- __ TestBitMask(r6, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
- __ bne(&class_constructor, cr0);
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
@@ -2063,6 +2057,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
r0);
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
+ __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset));
__ andi(r0, r6,
Operand(SharedFunctionInfo::IsStrictBit::kMask |
SharedFunctionInfo::IsNativeBit::kMask));
@@ -2131,14 +2126,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadU16(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
__ InvokeFunctionCode(r4, no_reg, r5, r3, InvokeType::kJump);
-
- // The function is a "classConstructor", need to raise an exception.
- __ bind(&class_constructor);
- {
- FrameAndConstantPoolScope frame(masm, StackFrame::INTERNAL);
- __ push(r4);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
- }
}
namespace {
@@ -2244,34 +2231,48 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the target to call (can be any Object).
// -----------------------------------
-
- Label non_callable, non_smi;
- __ JumpIfSmi(r4, &non_callable);
- __ bind(&non_smi);
- __ LoadMap(r7, r4);
- __ CompareInstanceTypeRange(r7, r8, FIRST_JS_FUNCTION_TYPE,
- LAST_JS_FUNCTION_TYPE);
+ Register argc = r3;
+ Register target = r4;
+ Register map = r7;
+ Register instance_type = r8;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
+
+ Label non_callable, class_constructor;
+ __ JumpIfSmi(target, &non_callable);
+ __ LoadMap(map, target);
+ __ CompareInstanceTypeRange(map, instance_type,
+ FIRST_CALLABLE_JS_FUNCTION_TYPE,
+ LAST_CALLABLE_JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, le);
- __ cmpi(r8, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ cmpi(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
RelocInfo::CODE_TARGET, eq);
// Check if target has a [[Call]] internal method.
- __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
- __ TestBit(r7, Map::Bits1::IsCallableBit::kShift, r0);
- __ beq(&non_callable, cr0);
+ {
+ Register flags = r7;
+ __ lbz(flags, FieldMemOperand(map, Map::kBitFieldOffset));
+ map = no_reg;
+ __ TestBit(flags, Map::Bits1::IsCallableBit::kShift, r0);
+ __ beq(&non_callable, cr0);
+ }
// Check if target is a proxy and call CallProxy external builtin
- __ cmpi(r8, Operand(JS_PROXY_TYPE));
+ __ cmpi(instance_type, Operand(JS_PROXY_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
+ // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ __ cmpi(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
+ __ beq(&class_constructor);
+
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
// Overwrite the original receiver the (original) target.
- __ StoreReceiver(r4, r3, r8);
+ __ StoreReceiver(target, argc, r8);
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadNativeContextSlot(r4, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2280,9 +2281,18 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ bind(&non_callable);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r4);
+ __ Push(target);
__ CallRuntime(Runtime::kThrowCalledNonCallable);
}
+
+ // 4. The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(target);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ __ Trap(); // Unreachable.
+ }
}
// static
@@ -2352,32 +2362,41 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// -- r6 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
+ Register argc = r3;
+ Register target = r4;
+ Register map = r7;
+ Register instance_type = r8;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
// Check if target is a Smi.
Label non_constructor, non_proxy;
- __ JumpIfSmi(r4, &non_constructor);
+ __ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ LoadTaggedPointerField(r7, FieldMemOperand(r4, HeapObject::kMapOffset),
- r0);
- __ lbz(r5, FieldMemOperand(r7, Map::kBitFieldOffset));
- __ TestBit(r5, Map::Bits1::IsConstructorBit::kShift, r0);
- __ beq(&non_constructor, cr0);
+ __ LoadTaggedPointerField(
+ map, FieldMemOperand(target, HeapObject::kMapOffset), r0);
+ {
+ Register flags = r5;
+ DCHECK(!AreAliased(argc, target, map, instance_type, flags));
+ __ lbz(flags, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ TestBit(flags, Map::Bits1::IsConstructorBit::kShift, r0);
+ __ beq(&non_constructor, cr0);
+ }
// Dispatch based on instance type.
- __ CompareInstanceTypeRange(r7, r8, FIRST_JS_FUNCTION_TYPE,
+ __ CompareInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE,
LAST_JS_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
RelocInfo::CODE_TARGET, le);
// Only dispatch to bound functions after checking whether they are
// constructors.
- __ cmpi(r8, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ cmpi(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
RelocInfo::CODE_TARGET, eq);
// Only dispatch to proxies after checking whether they are constructors.
- __ cmpi(r8, Operand(JS_PROXY_TYPE));
+ __ cmpi(instance_type, Operand(JS_PROXY_TYPE));
__ bne(&non_proxy);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
RelocInfo::CODE_TARGET);
@@ -2386,9 +2405,10 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
- __ StoreReceiver(r4, r3, r8);
+ __ StoreReceiver(target, argc, r8);
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadNativeContextSlot(r4, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target,
+ Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/builtins/promise-abstract-operations.tq b/deps/v8/src/builtins/promise-abstract-operations.tq
index 0e435afad9..5c871d3ff0 100644
--- a/deps/v8/src/builtins/promise-abstract-operations.tq
+++ b/deps/v8/src/builtins/promise-abstract-operations.tq
@@ -194,7 +194,7 @@ transitioning builtin
FulfillPromise(implicit context: Context)(
promise: JSPromise, value: JSAny): Undefined {
// Assert: The value of promise.[[PromiseState]] is "pending".
- assert(promise.Status() == PromiseState::kPending);
+ dcheck(promise.Status() == PromiseState::kPending);
RunContextPromiseHookResolve(promise);
@@ -469,7 +469,7 @@ transitioning macro PerformPromiseThenImpl(implicit context: Context)(
resultPromiseOrCapability);
} else
deferred {
- assert(promise.Status() == PromiseState::kRejected);
+ dcheck(promise.Status() == PromiseState::kRejected);
handlerContext = ExtractHandlerContext(onRejected, onFulfilled);
microtask = NewPromiseRejectReactionJobTask(
handlerContext, reactionsOrResult, onRejected,
diff --git a/deps/v8/src/builtins/promise-all-element-closure.tq b/deps/v8/src/builtins/promise-all-element-closure.tq
index 16e91dae06..24b9cfb346 100644
--- a/deps/v8/src/builtins/promise-all-element-closure.tq
+++ b/deps/v8/src/builtins/promise-all-element-closure.tq
@@ -103,7 +103,7 @@ transitioning macro PromiseAllResolveElementClosure<F: type>(
}
}
- assert(
+ dcheck(
promiseContext.length ==
SmiTag(PromiseAllResolveElementContextSlots::
kPromiseAllResolveElementLength));
@@ -111,10 +111,10 @@ transitioning macro PromiseAllResolveElementClosure<F: type>(
function.context = nativeContext;
// Determine the index from the {function}.
- assert(kPropertyArrayNoHashSentinel == 0);
+ dcheck(kPropertyArrayNoHashSentinel == 0);
const identityHash =
LoadJSReceiverIdentityHash(function) otherwise unreachable;
- assert(identityHash > 0);
+ dcheck(identityHash > 0);
const index = identityHash - 1;
let remainingElementsCount = *ContextSlot(
diff --git a/deps/v8/src/builtins/promise-all.tq b/deps/v8/src/builtins/promise-all.tq
index 5ab64a167d..602908d7f6 100644
--- a/deps/v8/src/builtins/promise-all.tq
+++ b/deps/v8/src/builtins/promise-all.tq
@@ -44,15 +44,15 @@ macro CreatePromiseAllResolveElementFunction(implicit context: Context)(
resolveElementContext: PromiseAllResolveElementContext, index: Smi,
nativeContext: NativeContext,
resolveFunction: SharedFunctionInfo): JSFunction {
- assert(index > 0);
- assert(index < kPropertyArrayHashFieldMax);
+ dcheck(index > 0);
+ dcheck(index < kPropertyArrayHashFieldMax);
const map = *ContextSlot(
nativeContext, ContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
const resolve = AllocateFunctionWithMapAndContext(
map, resolveFunction, resolveElementContext);
- assert(kPropertyArrayNoHashSentinel == 0);
+ dcheck(kPropertyArrayNoHashSentinel == 0);
resolve.properties_or_hash = index;
return resolve;
}
@@ -332,7 +332,7 @@ transitioning macro GeneratePromiseAll<F1: type, F2: type>(
const capability = NewPromiseCapability(receiver, False);
// NewPromiseCapability guarantees that receiver is Constructor.
- assert(Is<Constructor>(receiver));
+ dcheck(Is<Constructor>(receiver));
const constructor = UnsafeCast<Constructor>(receiver);
try {
diff --git a/deps/v8/src/builtins/promise-any.tq b/deps/v8/src/builtins/promise-any.tq
index d86e265d6c..1555511eda 100644
--- a/deps/v8/src/builtins/promise-any.tq
+++ b/deps/v8/src/builtins/promise-any.tq
@@ -57,14 +57,14 @@ transitioning macro CreatePromiseAnyRejectElementContext(
macro CreatePromiseAnyRejectElementFunction(implicit context: Context)(
rejectElementContext: PromiseAnyRejectElementContext, index: Smi,
nativeContext: NativeContext): JSFunction {
- assert(index > 0);
- assert(index < kPropertyArrayHashFieldMax);
+ dcheck(index > 0);
+ dcheck(index < kPropertyArrayHashFieldMax);
const map = *ContextSlot(
nativeContext, ContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
const rejectInfo = PromiseAnyRejectElementSharedFunConstant();
const reject =
AllocateFunctionWithMapAndContext(map, rejectInfo, rejectElementContext);
- assert(kPropertyArrayNoHashSentinel == 0);
+ dcheck(kPropertyArrayNoHashSentinel == 0);
reject.properties_or_hash = index;
return reject;
}
@@ -89,7 +89,7 @@ PromiseAnyRejectElementClosure(
return Undefined;
}
- assert(
+ dcheck(
context.length ==
SmiTag(
PromiseAnyRejectElementContextSlots::kPromiseAnyRejectElementLength));
@@ -100,9 +100,9 @@ PromiseAnyRejectElementClosure(
target.context = nativeContext;
// 5. Let index be F.[[Index]].
- assert(kPropertyArrayNoHashSentinel == 0);
+ dcheck(kPropertyArrayNoHashSentinel == 0);
const identityHash = LoadJSReceiverIdentityHash(target) otherwise unreachable;
- assert(identityHash > 0);
+ dcheck(identityHash > 0);
const index = identityHash - 1;
// 6. Let errors be F.[[Errors]].
@@ -328,7 +328,7 @@ PromiseAny(
const capability = NewPromiseCapability(receiver, False);
// NewPromiseCapability guarantees that receiver is Constructor.
- assert(Is<Constructor>(receiver));
+ dcheck(Is<Constructor>(receiver));
const constructor = UnsafeCast<Constructor>(receiver);
try {
@@ -365,7 +365,7 @@ PromiseAny(
goto Reject(e);
} label Reject(e: Object) deferred {
// Exception must be bound to a JS value.
- assert(e != TheHole);
+ dcheck(e != TheHole);
Call(
context, UnsafeCast<Callable>(capability.reject), Undefined,
UnsafeCast<JSAny>(e));
diff --git a/deps/v8/src/builtins/promise-finally.tq b/deps/v8/src/builtins/promise-finally.tq
index f576486850..ff979f9732 100644
--- a/deps/v8/src/builtins/promise-finally.tq
+++ b/deps/v8/src/builtins/promise-finally.tq
@@ -70,7 +70,7 @@ PromiseCatchFinally(
*ContextSlot(context, PromiseFinallyContextSlot::kConstructorSlot);
// 5. Assert: IsConstructor(C) is true.
- assert(IsConstructor(constructor));
+ dcheck(IsConstructor(constructor));
// 6. Let promise be ? PromiseResolve(C, result).
const promise = PromiseResolve(constructor, result);
@@ -117,7 +117,7 @@ PromiseThenFinally(
*ContextSlot(context, PromiseFinallyContextSlot::kConstructorSlot);
// 5. Assert: IsConstructor(C) is true.
- assert(IsConstructor(constructor));
+ dcheck(IsConstructor(constructor));
// 6. Let promise be ? PromiseResolve(C, result).
const promise = PromiseResolve(constructor, result);
@@ -185,7 +185,7 @@ PromisePrototypeFinally(
}
// 4. Assert: IsConstructor(C) is true.
- assert(IsConstructor(constructor));
+ dcheck(IsConstructor(constructor));
// 5. If IsCallable(onFinally) is not true,
// a. Let thenFinally be onFinally.
diff --git a/deps/v8/src/builtins/promise-misc.tq b/deps/v8/src/builtins/promise-misc.tq
index 58a4ad3c0d..e8b4842dd5 100644
--- a/deps/v8/src/builtins/promise-misc.tq
+++ b/deps/v8/src/builtins/promise-misc.tq
@@ -49,7 +49,7 @@ macro PromiseInit(promise: JSPromise): void {
macro InnerNewJSPromise(implicit context: Context)(): JSPromise {
const promiseFun = *NativeContextSlot(ContextSlot::PROMISE_FUNCTION_INDEX);
- assert(IsFunctionWithPrototypeSlotMap(promiseFun.map));
+ dcheck(IsFunctionWithPrototypeSlotMap(promiseFun.map));
const promiseMap = UnsafeCast<Map>(promiseFun.prototype_or_initial_map);
const promiseHeapObject = promise_internal::AllocateJSPromise(context);
*UnsafeConstCast(&promiseHeapObject.map) = promiseMap;
@@ -103,7 +103,7 @@ macro NewPromiseRejectReactionJobTask(implicit context: Context)(
@export
transitioning macro RunContextPromiseHookInit(implicit context: Context)(
- promise: JSPromise, parent: Object) {
+ promise: JSPromise, parent: Object): void {
const maybeHook = *NativeContextSlot(
ContextSlot::PROMISE_HOOK_INIT_FUNCTION_INDEX);
const hook = Cast<Callable>(maybeHook) otherwise return;
@@ -119,7 +119,7 @@ transitioning macro RunContextPromiseHookInit(implicit context: Context)(
@export
transitioning macro RunContextPromiseHookResolve(implicit context: Context)(
- promise: JSPromise) {
+ promise: JSPromise): void {
RunContextPromiseHook(
ContextSlot::PROMISE_HOOK_RESOLVE_FUNCTION_INDEX, promise,
PromiseHookFlags());
@@ -127,14 +127,14 @@ transitioning macro RunContextPromiseHookResolve(implicit context: Context)(
@export
transitioning macro RunContextPromiseHookResolve(implicit context: Context)(
- promise: JSPromise, flags: uint32) {
+ promise: JSPromise, flags: uint32): void {
RunContextPromiseHook(
ContextSlot::PROMISE_HOOK_RESOLVE_FUNCTION_INDEX, promise, flags);
}
@export
transitioning macro RunContextPromiseHookBefore(implicit context: Context)(
- promiseOrCapability: JSPromise|PromiseCapability|Undefined) {
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined): void {
RunContextPromiseHook(
ContextSlot::PROMISE_HOOK_BEFORE_FUNCTION_INDEX, promiseOrCapability,
PromiseHookFlags());
@@ -142,7 +142,8 @@ transitioning macro RunContextPromiseHookBefore(implicit context: Context)(
@export
transitioning macro RunContextPromiseHookBefore(implicit context: Context)(
- promiseOrCapability: JSPromise|PromiseCapability|Undefined, flags: uint32) {
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined, flags: uint32):
+ void {
RunContextPromiseHook(
ContextSlot::PROMISE_HOOK_BEFORE_FUNCTION_INDEX, promiseOrCapability,
flags);
@@ -150,7 +151,7 @@ transitioning macro RunContextPromiseHookBefore(implicit context: Context)(
@export
transitioning macro RunContextPromiseHookAfter(implicit context: Context)(
- promiseOrCapability: JSPromise|PromiseCapability|Undefined) {
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined): void {
RunContextPromiseHook(
ContextSlot::PROMISE_HOOK_AFTER_FUNCTION_INDEX, promiseOrCapability,
PromiseHookFlags());
@@ -158,7 +159,8 @@ transitioning macro RunContextPromiseHookAfter(implicit context: Context)(
@export
transitioning macro RunContextPromiseHookAfter(implicit context: Context)(
- promiseOrCapability: JSPromise|PromiseCapability|Undefined, flags: uint32) {
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined, flags: uint32):
+ void {
RunContextPromiseHook(
ContextSlot::PROMISE_HOOK_AFTER_FUNCTION_INDEX, promiseOrCapability,
flags);
@@ -166,7 +168,8 @@ transitioning macro RunContextPromiseHookAfter(implicit context: Context)(
transitioning macro RunContextPromiseHook(implicit context: Context)(
slot: Slot<NativeContext, Undefined|Callable>,
- promiseOrCapability: JSPromise|PromiseCapability|Undefined, flags: uint32) {
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined,
+ flags: uint32): void {
if (!IsContextPromiseHookEnabled(flags)) return;
const maybeHook = *NativeContextSlot(slot);
const hook = Cast<Callable>(maybeHook) otherwise return;
@@ -192,7 +195,7 @@ transitioning macro RunContextPromiseHook(implicit context: Context)(
}
transitioning macro RunAnyPromiseHookInit(implicit context: Context)(
- promise: JSPromise, parent: Object) {
+ promise: JSPromise, parent: Object): void {
const promiseHookFlags = PromiseHookFlags();
// Fast return if no hooks are set.
if (promiseHookFlags == 0) return;
@@ -230,7 +233,7 @@ transitioning macro NewJSPromise(implicit context: Context)(): JSPromise {
@export
transitioning macro NewJSPromise(implicit context: Context)(
status: constexpr PromiseState, result: JSAny): JSPromise {
- assert(status != PromiseState::kPending);
+ dcheck(status != PromiseState::kPending);
const instance = InnerNewJSPromise();
instance.reactions_or_result = result;
diff --git a/deps/v8/src/builtins/promise-race.tq b/deps/v8/src/builtins/promise-race.tq
index 973ddd8bac..eed1fae389 100644
--- a/deps/v8/src/builtins/promise-race.tq
+++ b/deps/v8/src/builtins/promise-race.tq
@@ -27,7 +27,7 @@ PromiseRace(
const promise = capability.promise;
// NewPromiseCapability guarantees that receiver is Constructor.
- assert(Is<Constructor>(receiver));
+ dcheck(Is<Constructor>(receiver));
const constructor = UnsafeCast<Constructor>(receiver);
// For catch prediction, don't treat the .then calls as handling it;
diff --git a/deps/v8/src/builtins/promise-resolve.tq b/deps/v8/src/builtins/promise-resolve.tq
index fa3d19411f..5b0a82ca3d 100644
--- a/deps/v8/src/builtins/promise-resolve.tq
+++ b/deps/v8/src/builtins/promise-resolve.tq
@@ -138,8 +138,8 @@ ResolvePromise(implicit context: Context)(
// ensures that the intrinsic %ObjectPrototype% doesn't contain any
// "then" property. This helps to avoid negative lookups on iterator
// results from async generators.
- assert(IsJSReceiverMap(resolutionMap));
- assert(!IsPromiseThenProtectorCellInvalid());
+ dcheck(IsJSReceiverMap(resolutionMap));
+ dcheck(!IsPromiseThenProtectorCellInvalid());
if (resolutionMap ==
*NativeContextSlot(
nativeContext, ContextSlot::ITERATOR_RESULT_MAP_INDEX)) {
diff --git a/deps/v8/src/builtins/proxy-delete-property.tq b/deps/v8/src/builtins/proxy-delete-property.tq
index a5925c2f7d..330cf8e0cd 100644
--- a/deps/v8/src/builtins/proxy-delete-property.tq
+++ b/deps/v8/src/builtins/proxy-delete-property.tq
@@ -15,15 +15,15 @@ ProxyDeleteProperty(implicit context: Context)(
// Handle deeply nested proxy.
PerformStackCheck();
// 1. Assert: IsPropertyKey(P) is true.
- assert(TaggedIsNotSmi(name));
- assert(Is<Name>(name));
- assert(!IsPrivateSymbol(name));
+ dcheck(TaggedIsNotSmi(name));
+ dcheck(Is<Name>(name));
+ dcheck(!IsPrivateSymbol(name));
try {
// 2. Let handler be O.[[ProxyHandler]].
// 3. If handler is null, throw a TypeError exception.
// 4. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ dcheck(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
const handler =
Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
diff --git a/deps/v8/src/builtins/proxy-get-property.tq b/deps/v8/src/builtins/proxy-get-property.tq
index 563b38be37..0471cf318a 100644
--- a/deps/v8/src/builtins/proxy-get-property.tq
+++ b/deps/v8/src/builtins/proxy-get-property.tq
@@ -17,9 +17,9 @@ ProxyGetProperty(implicit context: Context)(
onNonExistent: Smi): JSAny {
PerformStackCheck();
// 1. Assert: IsPropertyKey(P) is true.
- assert(TaggedIsNotSmi(name));
- assert(Is<Name>(name));
- assert(!IsPrivateSymbol(name));
+ dcheck(TaggedIsNotSmi(name));
+ dcheck(Is<Name>(name));
+ dcheck(!IsPrivateSymbol(name));
// 2. Let handler be O.[[ProxyHandler]].
// 3. If handler is null, throw a TypeError exception.
diff --git a/deps/v8/src/builtins/proxy-get-prototype-of.tq b/deps/v8/src/builtins/proxy-get-prototype-of.tq
index 152489ecb6..ad22ab2986 100644
--- a/deps/v8/src/builtins/proxy-get-prototype-of.tq
+++ b/deps/v8/src/builtins/proxy-get-prototype-of.tq
@@ -16,7 +16,7 @@ ProxyGetPrototypeOf(implicit context: Context)(proxy: JSProxy): JSAny {
// 1. Let handler be O.[[ProxyHandler]].
// 2. If handler is null, throw a TypeError exception.
// 3. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ dcheck(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
const handler =
Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
@@ -40,7 +40,7 @@ ProxyGetPrototypeOf(implicit context: Context)(proxy: JSProxy): JSAny {
// 9. Let extensibleTarget be ? IsExtensible(target).
// 10. If extensibleTarget is true, return handlerProto.
const extensibleTarget: JSAny = object::ObjectIsExtensibleImpl(target);
- assert(extensibleTarget == True || extensibleTarget == False);
+ dcheck(extensibleTarget == True || extensibleTarget == False);
if (extensibleTarget == True) {
return handlerProto;
}
diff --git a/deps/v8/src/builtins/proxy-has-property.tq b/deps/v8/src/builtins/proxy-has-property.tq
index fc81d5dcc9..75ac60d03c 100644
--- a/deps/v8/src/builtins/proxy-has-property.tq
+++ b/deps/v8/src/builtins/proxy-has-property.tq
@@ -10,19 +10,19 @@ namespace proxy {
// https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-hasproperty-p
transitioning builtin ProxyHasProperty(implicit context: Context)(
proxy: JSProxy, name: PropertyKey): JSAny {
- assert(Is<JSProxy>(proxy));
+ dcheck(Is<JSProxy>(proxy));
PerformStackCheck();
// 1. Assert: IsPropertyKey(P) is true.
- assert(Is<Name>(name));
- assert(!IsPrivateSymbol(name));
+ dcheck(Is<Name>(name));
+ dcheck(!IsPrivateSymbol(name));
try {
// 2. Let handler be O.[[ProxyHandler]].
// 3. If handler is null, throw a TypeError exception.
// 4. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ dcheck(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
const handler =
Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
diff --git a/deps/v8/src/builtins/proxy-is-extensible.tq b/deps/v8/src/builtins/proxy-is-extensible.tq
index a7c2c56d44..58f147c296 100644
--- a/deps/v8/src/builtins/proxy-is-extensible.tq
+++ b/deps/v8/src/builtins/proxy-is-extensible.tq
@@ -16,7 +16,7 @@ transitioning builtin ProxyIsExtensible(implicit context: Context)(
// 1. Let handler be O.[[ProxyHandler]].
// 2. If handler is null, throw a TypeError exception.
// 3. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ dcheck(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
const handler =
Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
diff --git a/deps/v8/src/builtins/proxy-prevent-extensions.tq b/deps/v8/src/builtins/proxy-prevent-extensions.tq
index a5a3d93da4..9f7a226b3a 100644
--- a/deps/v8/src/builtins/proxy-prevent-extensions.tq
+++ b/deps/v8/src/builtins/proxy-prevent-extensions.tq
@@ -17,7 +17,7 @@ ProxyPreventExtensions(implicit context: Context)(
// 1. Let handler be O.[[ProxyHandler]].
// 2. If handler is null, throw a TypeError exception.
// 3. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ dcheck(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
const handler =
Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
@@ -38,7 +38,7 @@ ProxyPreventExtensions(implicit context: Context)(
// 8.b If extensibleTarget is true, throw a TypeError exception.
if (ToBoolean(trapResult)) {
const extensibleTarget: JSAny = object::ObjectIsExtensibleImpl(target);
- assert(extensibleTarget == True || extensibleTarget == False);
+ dcheck(extensibleTarget == True || extensibleTarget == False);
if (extensibleTarget == True) {
ThrowTypeError(MessageTemplate::kProxyPreventExtensionsExtensible);
}
diff --git a/deps/v8/src/builtins/proxy-revoke.tq b/deps/v8/src/builtins/proxy-revoke.tq
index d031bb9f1d..0c6c9dbb25 100644
--- a/deps/v8/src/builtins/proxy-revoke.tq
+++ b/deps/v8/src/builtins/proxy-revoke.tq
@@ -26,7 +26,7 @@ ProxyRevoke(js-implicit context: Context)(): Undefined {
*proxySlot = Null;
// 4. Assert: p is a Proxy object.
- assert(Is<JSProxy>(proxy));
+ dcheck(Is<JSProxy>(proxy));
// 5. Set p.[[ProxyTarget]] to null.
proxy.target = Null;
diff --git a/deps/v8/src/builtins/proxy-set-property.tq b/deps/v8/src/builtins/proxy-set-property.tq
index 441a5d418d..8a7dfde9e5 100644
--- a/deps/v8/src/builtins/proxy-set-property.tq
+++ b/deps/v8/src/builtins/proxy-set-property.tq
@@ -11,7 +11,7 @@ SetPropertyWithReceiver(implicit context: Context)(
Object, Name, Object, Object): void;
transitioning macro CallThrowTypeErrorIfStrict(implicit context: Context)(
- message: constexpr MessageTemplate) {
+ message: constexpr MessageTemplate): void {
ThrowTypeErrorIfStrict(SmiConstant(message), Null, Null);
}
@@ -22,8 +22,8 @@ ProxySetProperty(implicit context: Context)(
proxy: JSProxy, name: PropertyKey|PrivateSymbol, value: JSAny,
receiverValue: JSAny): JSAny {
// 1. Assert: IsPropertyKey(P) is true.
- assert(TaggedIsNotSmi(name));
- assert(Is<Name>(name));
+ dcheck(TaggedIsNotSmi(name));
+ dcheck(Is<Name>(name));
let key: PropertyKey;
typeswitch (name) {
@@ -40,7 +40,7 @@ ProxySetProperty(implicit context: Context)(
// 2. Let handler be O.[[ProxyHandler]].
// 3. If handler is null, throw a TypeError exception.
// 4. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ dcheck(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
const handler =
Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
diff --git a/deps/v8/src/builtins/proxy-set-prototype-of.tq b/deps/v8/src/builtins/proxy-set-prototype-of.tq
index ec68cef44c..57ceb27784 100644
--- a/deps/v8/src/builtins/proxy-set-prototype-of.tq
+++ b/deps/v8/src/builtins/proxy-set-prototype-of.tq
@@ -15,12 +15,12 @@ ProxySetPrototypeOf(implicit context: Context)(
const kTrapName: constexpr string = 'setPrototypeOf';
try {
// 1. Assert: Either Type(V) is Object or Type(V) is Null.
- assert(proto == Null || Is<JSReceiver>(proto));
+ dcheck(proto == Null || Is<JSReceiver>(proto));
// 2. Let handler be O.[[ProxyHandler]].
// 3. If handler is null, throw a TypeError exception.
// 4. Assert: Type(handler) is Object.
- assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ dcheck(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
const handler =
Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
@@ -48,7 +48,7 @@ ProxySetPrototypeOf(implicit context: Context)(
// 10. Let extensibleTarget be ? IsExtensible(target).
// 11. If extensibleTarget is true, return true.
const extensibleTarget: Object = object::ObjectIsExtensibleImpl(target);
- assert(extensibleTarget == True || extensibleTarget == False);
+ dcheck(extensibleTarget == True || extensibleTarget == False);
if (extensibleTarget == True) {
return True;
}
diff --git a/deps/v8/src/builtins/proxy.tq b/deps/v8/src/builtins/proxy.tq
index e80ed36192..9e56a28903 100644
--- a/deps/v8/src/builtins/proxy.tq
+++ b/deps/v8/src/builtins/proxy.tq
@@ -11,13 +11,13 @@ extern macro ProxiesCodeStubAssembler::AllocateProxy(implicit context: Context)(
extern transitioning macro ProxiesCodeStubAssembler::CheckGetSetTrapResult(
implicit context: Context)(
- JSReceiver, JSProxy, Name, Object, constexpr int31);
+ JSReceiver, JSProxy, Name, Object, constexpr int31): void;
extern transitioning macro ProxiesCodeStubAssembler::CheckDeleteTrapResult(
- implicit context: Context)(JSReceiver, JSProxy, Name);
+ implicit context: Context)(JSReceiver, JSProxy, Name): void;
extern transitioning macro ProxiesCodeStubAssembler::CheckHasTrapResult(
- implicit context: Context)(JSReceiver, JSProxy, Name);
+ implicit context: Context)(JSReceiver, JSProxy, Name): void;
const kProxyGet: constexpr int31
generates 'JSProxy::AccessKind::kGet';
diff --git a/deps/v8/src/builtins/regexp-match-all.tq b/deps/v8/src/builtins/regexp-match-all.tq
index 932972d844..1f9aa1819f 100644
--- a/deps/v8/src/builtins/regexp-match-all.tq
+++ b/deps/v8/src/builtins/regexp-match-all.tq
@@ -41,7 +41,7 @@ transitioning macro RegExpPrototypeMatchAllImpl(implicit context: Context)(
const flags: String = FastFlagsGetter(fastRegExp);
matcher = RegExpCreate(nativeContext, source, flags);
const matcherRegExp = UnsafeCast<JSRegExp>(matcher);
- assert(IsFastRegExpPermissive(matcherRegExp));
+ dcheck(IsFastRegExpPermissive(matcherRegExp));
// 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")).
// 8. Perform ? Set(matcher, "lastIndex", lastIndex, true).
@@ -159,7 +159,7 @@ transitioning javascript builtin RegExpStringIteratorPrototypeNext(
return AllocateJSIteratorResult(UnsafeCast<JSAny>(match), False);
}
// a. If global is true,
- assert(flags.global);
+ dcheck(flags.global);
if (isFastRegExp) {
// i. Let matchStr be ? ToString(? Get(match, "0")).
const match = UnsafeCast<JSRegExpResult>(match);
@@ -168,7 +168,7 @@ transitioning javascript builtin RegExpStringIteratorPrototypeNext(
// When iterating_regexp is fast, we assume it stays fast even after
// accessing the first match from the RegExp result.
- assert(IsFastRegExpPermissive(iteratingRegExp));
+ dcheck(IsFastRegExpPermissive(iteratingRegExp));
const iteratingRegExp = UnsafeCast<JSRegExp>(iteratingRegExp);
if (matchStr == kEmptyString) {
// 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")).
@@ -186,7 +186,7 @@ transitioning javascript builtin RegExpStringIteratorPrototypeNext(
// iii. Return ! CreateIterResultObject(match, false).
return AllocateJSIteratorResult(match, False);
}
- assert(!isFastRegExp);
+ dcheck(!isFastRegExp);
// i. Let matchStr be ? ToString(? Get(match, "0")).
const match = UnsafeCast<JSAny>(match);
const matchStr = ToString_Inline(GetProperty(match, SmiConstant(0)));
diff --git a/deps/v8/src/builtins/regexp-match.tq b/deps/v8/src/builtins/regexp-match.tq
index 5fca09893c..3da132636a 100644
--- a/deps/v8/src/builtins/regexp-match.tq
+++ b/deps/v8/src/builtins/regexp-match.tq
@@ -22,7 +22,7 @@ extern macro UnsafeLoadFixedArrayElement(
transitioning macro RegExpPrototypeMatchBody(implicit context: Context)(
regexp: JSReceiver, string: String, isFastPath: constexpr bool): JSAny {
if constexpr (isFastPath) {
- assert(Is<FastJSRegExp>(regexp));
+ dcheck(Is<FastJSRegExp>(regexp));
}
const isGlobal: bool = FlagGetter(regexp, Flag::kGlobal, isFastPath);
@@ -32,7 +32,7 @@ transitioning macro RegExpPrototypeMatchBody(implicit context: Context)(
RegExpExec(regexp, string);
}
- assert(isGlobal);
+ dcheck(isGlobal);
const isUnicode: bool = FlagGetter(regexp, Flag::kUnicode, isFastPath);
StoreLastIndex(regexp, 0, isFastPath);
@@ -74,7 +74,7 @@ transitioning macro RegExpPrototypeMatchBody(implicit context: Context)(
string, UnsafeCast<Smi>(matchFrom), UnsafeCast<Smi>(matchTo));
}
} else {
- assert(!isFastPath);
+ dcheck(!isFastPath);
const resultTemp = RegExpExec(regexp, string);
if (resultTemp == Null) {
goto IfDidNotMatch;
@@ -96,7 +96,7 @@ transitioning macro RegExpPrototypeMatchBody(implicit context: Context)(
}
let lastIndex = LoadLastIndex(regexp, isFastPath);
if constexpr (isFastPath) {
- assert(TaggedIsPositiveSmi(lastIndex));
+ dcheck(TaggedIsPositiveSmi(lastIndex));
} else {
lastIndex = ToLength_Inline(lastIndex);
}
@@ -109,7 +109,7 @@ transitioning macro RegExpPrototypeMatchBody(implicit context: Context)(
// incremented to overflow the Smi range since the maximal string
// length is less than the maximal Smi value.
StaticAssertStringLengthFitsSmi();
- assert(TaggedIsPositiveSmi(newLastIndex));
+ dcheck(TaggedIsPositiveSmi(newLastIndex));
}
StoreLastIndex(regexp, newLastIndex, isFastPath);
diff --git a/deps/v8/src/builtins/regexp-replace.tq b/deps/v8/src/builtins/regexp-replace.tq
index c59a41b27f..d26f8d6949 100644
--- a/deps/v8/src/builtins/regexp-replace.tq
+++ b/deps/v8/src/builtins/regexp-replace.tq
@@ -22,7 +22,7 @@ StringReplaceNonGlobalRegExpWithFunction(implicit context: Context)(
transitioning macro RegExpReplaceCallableNoExplicitCaptures(
implicit context: Context)(
matchesElements: FixedArray, matchesLength: intptr, string: String,
- replaceFn: Callable) {
+ replaceFn: Callable): void {
let matchStart: Smi = 0;
for (let i: intptr = 0; i < matchesLength; i++) {
typeswitch (matchesElements.objects[i]) {
@@ -63,7 +63,8 @@ transitioning macro RegExpReplaceCallableNoExplicitCaptures(
transitioning macro
RegExpReplaceCallableWithExplicitCaptures(implicit context: Context)(
- matchesElements: FixedArray, matchesLength: intptr, replaceFn: Callable) {
+ matchesElements: FixedArray, matchesLength: intptr,
+ replaceFn: Callable): void {
for (let i: intptr = 0; i < matchesLength; i++) {
const elArray =
Cast<JSArray>(matchesElements.objects[i]) otherwise continue;
@@ -175,9 +176,9 @@ transitioning macro RegExpReplaceFastString(implicit context: Context)(
transitioning builtin RegExpReplace(implicit context: Context)(
regexp: FastJSRegExp, string: String, replaceValue: JSAny): String {
- // TODO(pwong): Remove assert when all callers (StringPrototypeReplace) are
+ // TODO(pwong): Remove dcheck when all callers (StringPrototypeReplace) are
// from Torque.
- assert(Is<FastJSRegExp>(regexp));
+ dcheck(Is<FastJSRegExp>(regexp));
// 2. Is {replace_value} callable?
typeswitch (replaceValue) {
diff --git a/deps/v8/src/builtins/regexp-search.tq b/deps/v8/src/builtins/regexp-search.tq
index b70d23a0dd..7deec8b1c6 100644
--- a/deps/v8/src/builtins/regexp-search.tq
+++ b/deps/v8/src/builtins/regexp-search.tq
@@ -9,7 +9,7 @@ namespace regexp {
transitioning macro
RegExpPrototypeSearchBodyFast(implicit context: Context)(
regexp: JSRegExp, string: String): JSAny {
- assert(IsFastRegExpPermissive(regexp));
+ dcheck(IsFastRegExpPermissive(regexp));
// Grab the initial value of last index.
const previousLastIndex: Smi = FastLoadLastIndex(regexp);
diff --git a/deps/v8/src/builtins/regexp.tq b/deps/v8/src/builtins/regexp.tq
index 29fad26736..5760b06658 100644
--- a/deps/v8/src/builtins/regexp.tq
+++ b/deps/v8/src/builtins/regexp.tq
@@ -86,7 +86,7 @@ transitioning macro RegExpPrototypeExecBodyWithoutResult(
regexp: JSRegExp, string: String, regexpLastIndex: Number,
isFastPath: constexpr bool): RegExpMatchInfo labels IfDidNotMatch {
if (isFastPath) {
- assert(HasInitialRegExpMap(regexp));
+ dcheck(HasInitialRegExpMap(regexp));
} else {
IncrementUseCounter(context, SmiConstant(kRegExpExecCalledOnSlowRegExp));
}
@@ -397,7 +397,7 @@ transitioning macro IsRegExp(implicit context: Context)(obj: JSAny): bool {
return Is<JSRegExp>(receiver);
}
- assert(value != Undefined);
+ dcheck(value != Undefined);
// The common path. Symbol.match exists, equals the RegExpPrototypeMatch
// function (and is thus trueish), and the receiver is a JSRegExp.
if (ToBoolean(value)) {
@@ -408,7 +408,7 @@ transitioning macro IsRegExp(implicit context: Context)(obj: JSAny): bool {
return true;
}
- assert(!ToBoolean(value));
+ dcheck(!ToBoolean(value));
if (Is<JSRegExp>(receiver)) {
IncrementUseCounter(context, SmiConstant(kRegExpMatchIsFalseishOnJSRegExp));
}
diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
index c90352bea1..3676ae3441 100644
--- a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
+++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
@@ -327,7 +327,7 @@ static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
__ Ld(scratch, FieldMemOperand(code, Code::kFlagsOffset));
__ DecodeField<Code::KindField>(scratch);
__ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
- Operand(static_cast<int>(CodeKind::BASELINE)));
+ Operand(static_cast<int64_t>(CodeKind::BASELINE)));
}
// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
// the more general dispatch.
@@ -1023,7 +1023,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// Bailout to the return label if this is a return bytecode.
#define JUMP_IF_EQUAL(NAME) \
__ Branch(if_return, eq, bytecode, \
- Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
+ Operand(static_cast<int64_t>(interpreter::Bytecode::k##NAME)));
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
@@ -1031,7 +1031,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
// of the loop.
Label end, not_jump_loop;
__ Branch(&not_jump_loop, ne, bytecode,
- Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)),
+ Operand(static_cast<int64_t>(interpreter::Bytecode::kJumpLoop)),
Label::Distance::kNear);
// We need to restore the original bytecode_offset since we might have
// increased it to skip the wide / extra-wide prefix bytecode.
@@ -3479,7 +3479,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ JumpIfSmi(a1, &context_check);
__ Ld(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
- __ li(a1, Operand(static_cast<int>(deopt_kind)));
+ __ li(a1, Operand(static_cast<int64_t>(deopt_kind)));
// a2: bailout id already loaded.
// a3: code address or 0 already loaded.
// a4: already has fp-to-sp delta.
@@ -3851,7 +3851,7 @@ void Builtins::Generate_DynamicCheckMapsTrampoline(
Label deopt, bailout;
__ Branch(&deopt, ne, a0,
- Operand(static_cast<int>(DynamicCheckMapsStatus::kSuccess)),
+ Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kSuccess)),
Label::Distance::kNear);
__ MaybeRestoreRegisters(registers);
@@ -3860,11 +3860,11 @@ void Builtins::Generate_DynamicCheckMapsTrampoline(
__ bind(&deopt);
__ Branch(&bailout, eq, a0,
- Operand(static_cast<int>(DynamicCheckMapsStatus::kBailout)));
+ Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kBailout)));
if (FLAG_debug_code) {
__ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus, a0,
- Operand(static_cast<int>(DynamicCheckMapsStatus::kDeopt)));
+ Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kDeopt)));
}
__ MaybeRestoreRegisters(registers);
__ LeaveFrame(StackFrame::INTERNAL);
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 5ee2cf7c6a..65fffbba79 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -2091,14 +2091,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -----------------------------------
__ AssertFunction(r3);
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
- // Check that the function is not a "classConstructor".
- Label class_constructor;
__ LoadTaggedPointerField(
r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadU32(r5, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
- __ TestBitMask(r5, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
- __ bne(&class_constructor);
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
@@ -2107,6 +2101,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
FieldMemOperand(r3, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
+ __ LoadU32(r5, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
__ AndP(r0, r5,
Operand(SharedFunctionInfo::IsStrictBit::kMask |
SharedFunctionInfo::IsNativeBit::kMask));
@@ -2175,14 +2170,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadU16(
r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
__ InvokeFunctionCode(r3, no_reg, r4, r2, InvokeType::kJump);
-
- // The function is a "classConstructor", need to raise an exception.
- __ bind(&class_constructor);
- {
- FrameAndConstantPoolScope frame(masm, StackFrame::INTERNAL);
- __ push(r3);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
- }
}
namespace {
@@ -2286,34 +2273,48 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the target to call (can be any Object).
// -----------------------------------
-
- Label non_callable, non_smi;
- __ JumpIfSmi(r3, &non_callable);
- __ bind(&non_smi);
- __ LoadMap(r6, r3);
- __ CompareInstanceTypeRange(r6, r7, FIRST_JS_FUNCTION_TYPE,
- LAST_JS_FUNCTION_TYPE);
+ Register argc = r2;
+ Register target = r3;
+ Register map = r6;
+ Register instance_type = r7;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
+
+ Label non_callable, class_constructor;
+ __ JumpIfSmi(target, &non_callable);
+ __ LoadMap(map, target);
+ __ CompareInstanceTypeRange(map, instance_type,
+ FIRST_CALLABLE_JS_FUNCTION_TYPE,
+ LAST_CALLABLE_JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, le);
- __ CmpS64(r7, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ CmpS64(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
RelocInfo::CODE_TARGET, eq);
// Check if target has a [[Call]] internal method.
- __ LoadU8(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ TestBit(r6, Map::Bits1::IsCallableBit::kShift);
- __ beq(&non_callable);
+ {
+ Register flags = r6;
+ __ LoadU8(flags, FieldMemOperand(map, Map::kBitFieldOffset));
+ map = no_reg;
+ __ TestBit(flags, Map::Bits1::IsCallableBit::kShift);
+ __ beq(&non_callable);
+ }
// Check if target is a proxy and call CallProxy external builtin
- __ CmpS64(r7, Operand(JS_PROXY_TYPE));
+ __ CmpS64(instance_type, Operand(JS_PROXY_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
+ // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ __ CmpS64(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
+ __ beq(&class_constructor);
+
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
// Overwrite the original receiver the (original) target.
- __ StoreReceiver(r3, r2, r7);
+ __ StoreReceiver(target, argc, r7);
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadNativeContextSlot(r3, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2322,8 +2323,18 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ bind(&non_callable);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r3);
+ __ Push(target);
__ CallRuntime(Runtime::kThrowCalledNonCallable);
+ __ Trap(); // Unreachable.
+ }
+
+ // 4. The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(target);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ __ Trap(); // Unreachable.
}
}
@@ -2393,31 +2404,41 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// -- r5 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
+ Register argc = r2;
+ Register target = r3;
+ Register map = r6;
+ Register instance_type = r7;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
// Check if target is a Smi.
Label non_constructor, non_proxy;
- __ JumpIfSmi(r3, &non_constructor);
+ __ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ LoadTaggedPointerField(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadU8(r4, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ TestBit(r4, Map::Bits1::IsConstructorBit::kShift);
- __ beq(&non_constructor);
+ __ LoadTaggedPointerField(map,
+ FieldMemOperand(target, HeapObject::kMapOffset));
+ {
+ Register flags = r4;
+ DCHECK(!AreAliased(argc, target, map, instance_type, flags));
+ __ LoadU8(flags, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ TestBit(flags, Map::Bits1::IsConstructorBit::kShift);
+ __ beq(&non_constructor);
+ }
// Dispatch based on instance type.
- __ CompareInstanceTypeRange(r6, r7, FIRST_JS_FUNCTION_TYPE,
+ __ CompareInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE,
LAST_JS_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
RelocInfo::CODE_TARGET, le);
// Only dispatch to bound functions after checking whether they are
// constructors.
- __ CmpS64(r7, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ CmpS64(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
RelocInfo::CODE_TARGET, eq);
// Only dispatch to proxies after checking whether they are constructors.
- __ CmpS64(r7, Operand(JS_PROXY_TYPE));
+ __ CmpS64(instance_type, Operand(JS_PROXY_TYPE));
__ bne(&non_proxy);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
RelocInfo::CODE_TARGET);
@@ -2426,9 +2447,10 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
- __ StoreReceiver(r3, r2, r7);
+ __ StoreReceiver(target, argc, r7);
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadNativeContextSlot(r3, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target,
+ Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index 5ad0319f63..d61a2705fb 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -66,7 +66,7 @@ Handle<Code> BuildPlaceholder(Isolate* isolate, Builtin builtin) {
ExternalAssemblerBuffer(buffer, kBufferSize));
DCHECK(!masm.has_frame());
{
- FrameScope scope(&masm, StackFrame::NONE);
+ FrameScope frame_scope(&masm, StackFrame::NO_FRAME_TYPE);
// The contents of placeholder don't matter, as long as they don't create
// embedded constants or external references.
masm.Move(kJavaScriptCallCodeStartRegister, Smi::zero());
diff --git a/deps/v8/src/builtins/string-pad.tq b/deps/v8/src/builtins/string-pad.tq
index b95e68628a..6812a32b7d 100644
--- a/deps/v8/src/builtins/string-pad.tq
+++ b/deps/v8/src/builtins/string-pad.tq
@@ -22,7 +22,7 @@ transitioning macro StringPad(implicit context: Context)(
return receiverString;
}
const maxLength: Number = ToLength_Inline(arguments[0]);
- assert(IsNumberNormalized(maxLength));
+ dcheck(IsNumberNormalized(maxLength));
typeswitch (maxLength) {
case (smiMaxLength: Smi): {
@@ -49,7 +49,7 @@ transitioning macro StringPad(implicit context: Context)(
}
// Pad.
- assert(fillLength > 0);
+ dcheck(fillLength > 0);
// Throw if max_length is greater than String::kMaxLength.
if (!TaggedIsSmi(maxLength)) {
ThrowInvalidStringLength(context);
@@ -59,7 +59,7 @@ transitioning macro StringPad(implicit context: Context)(
if (smiMaxLength > SmiConstant(kStringMaxLength)) {
ThrowInvalidStringLength(context);
}
- assert(smiMaxLength > stringLength);
+ dcheck(smiMaxLength > stringLength);
const padLength: Smi = smiMaxLength - stringLength;
let padding: String;
@@ -85,11 +85,11 @@ transitioning macro StringPad(implicit context: Context)(
}
// Return result.
- assert(padLength == padding.length_smi);
+ dcheck(padLength == padding.length_smi);
if (variant == kStringPadStart) {
return padding + receiverString;
}
- assert(variant == kStringPadEnd);
+ dcheck(variant == kStringPadEnd);
return receiverString + padding;
}
diff --git a/deps/v8/src/builtins/string-repeat.tq b/deps/v8/src/builtins/string-repeat.tq
index e1e33eb53a..b5ced876b7 100644
--- a/deps/v8/src/builtins/string-repeat.tq
+++ b/deps/v8/src/builtins/string-repeat.tq
@@ -7,8 +7,8 @@ const kBuiltinName: constexpr string = 'String.prototype.repeat';
builtin StringRepeat(implicit context: Context)(
string: String, count: Smi): String {
- assert(count >= 0);
- assert(string != kEmptyString);
+ dcheck(count >= 0);
+ dcheck(string != kEmptyString);
let result: String = kEmptyString;
let powerOfTwoRepeats: String = string;
@@ -50,7 +50,7 @@ transitioning javascript builtin StringPrototypeRepeat(
return StringRepeat(s, n);
}
case (heapNum: HeapNumber): deferred {
- assert(IsNumberNormalized(heapNum));
+ dcheck(IsNumberNormalized(heapNum));
const n = LoadHeapNumberValue(heapNum);
// 4. If n < 0, throw a RangeError exception.
diff --git a/deps/v8/src/builtins/string-substr.tq b/deps/v8/src/builtins/string-substr.tq
index 068c4437ca..9c0f63d085 100644
--- a/deps/v8/src/builtins/string-substr.tq
+++ b/deps/v8/src/builtins/string-substr.tq
@@ -27,7 +27,7 @@ transitioning javascript builtin StringPrototypeSubstr(
// 7. Let resultLength be min(max(end, 0), size - intStart).
const length = arguments[1];
const lengthLimit = size - initStart;
- assert(lengthLimit <= size);
+ dcheck(lengthLimit <= size);
const resultLength: uintptr = length != Undefined ?
ClampToIndexRange(length, lengthLimit) :
lengthLimit;
diff --git a/deps/v8/src/builtins/torque-csa-header-includes.h b/deps/v8/src/builtins/torque-csa-header-includes.h
index 879fda5bbe..750843e6db 100644
--- a/deps/v8/src/builtins/torque-csa-header-includes.h
+++ b/deps/v8/src/builtins/torque-csa-header-includes.h
@@ -14,6 +14,5 @@
#include "src/compiler/code-assembler.h"
#include "src/utils/utils.h"
#include "torque-generated/csa-types.h"
-#include "torque-generated/field-offsets.h"
#endif // V8_BUILTINS_TORQUE_CSA_HEADER_INCLUDES_H_
diff --git a/deps/v8/src/builtins/torque-internal.tq b/deps/v8/src/builtins/torque-internal.tq
index d9f05f5533..9fe503f5f5 100644
--- a/deps/v8/src/builtins/torque-internal.tq
+++ b/deps/v8/src/builtins/torque-internal.tq
@@ -231,19 +231,20 @@ const kAllocateBaseFlags: constexpr AllocationFlag =
AllocationFlag::kAllowLargeObjectAllocation;
macro AllocateFromNew(
sizeInBytes: intptr, map: Map, pretenured: bool): UninitializedHeapObject {
- assert(ValidAllocationSize(sizeInBytes, map));
+ dcheck(ValidAllocationSize(sizeInBytes, map));
if (pretenured) {
return Allocate(
sizeInBytes,
%RawConstexprCast<constexpr AllocationFlag>(
- kAllocateBaseFlags | AllocationFlag::kPretenured));
+ %RawConstexprCast<constexpr int32>(kAllocateBaseFlags) |
+ %RawConstexprCast<constexpr int32>(AllocationFlag::kPretenured)));
} else {
return Allocate(sizeInBytes, kAllocateBaseFlags);
}
}
macro InitializeFieldsFromIterator<T: type, Iterator: type>(
- target: MutableSlice<T>, originIterator: Iterator) {
+ target: MutableSlice<T>, originIterator: Iterator): void {
let targetIterator = target.Iterator();
let originIterator = originIterator;
while (true) {
@@ -253,12 +254,14 @@ macro InitializeFieldsFromIterator<T: type, Iterator: type>(
}
// Dummy implementations: do not initialize for UninitializedIterator.
InitializeFieldsFromIterator<char8, UninitializedIterator>(
- _target: MutableSlice<char8>, _originIterator: UninitializedIterator) {}
+ _target: MutableSlice<char8>,
+ _originIterator: UninitializedIterator): void {}
InitializeFieldsFromIterator<char16, UninitializedIterator>(
- _target: MutableSlice<char16>, _originIterator: UninitializedIterator) {}
+ _target: MutableSlice<char16>,
+ _originIterator: UninitializedIterator): void {}
extern macro IsDoubleHole(HeapObject, intptr): bool;
-extern macro StoreDoubleHole(HeapObject, intptr);
+extern macro StoreDoubleHole(HeapObject, intptr): void;
macro LoadFloat64OrHole(r:&float64_or_hole): float64_or_hole {
return float64_or_hole{
@@ -267,7 +270,7 @@ macro LoadFloat64OrHole(r:&float64_or_hole): float64_or_hole {
value: *unsafe::NewReference<float64>(r.object, r.offset)
};
}
-macro StoreFloat64OrHole(r:&float64_or_hole, value: float64_or_hole) {
+macro StoreFloat64OrHole(r:&float64_or_hole, value: float64_or_hole): void {
if (value.is_hole) {
StoreDoubleHole(
%RawDownCast<HeapObject>(r.object), r.offset - kHeapObjectTag);
@@ -297,12 +300,12 @@ macro DownCastForTorqueClass<T : type extends HeapObject>(o: HeapObject):
return %RawDownCast<T>(o);
}
-extern macro StaticAssert(bool, constexpr string);
+extern macro StaticAssert(bool, constexpr string): void;
// This is for the implementation of the dot operator. In any context where the
// dot operator is available, the correct way to get the length of an indexed
// field x from object o is `(&o.x).length`.
-intrinsic %IndexedFieldLength<T: type>(o: T, f: constexpr string);
+intrinsic %IndexedFieldLength<T: type>(o: T, f: constexpr string): intptr;
// If field x is defined as optional, then &o.x returns a reference to the field
// or crashes the program (unreachable) if the field is not present. Usually
@@ -311,7 +314,8 @@ intrinsic %IndexedFieldLength<T: type>(o: T, f: constexpr string);
// optional field, which is either length zero or one depending on whether the
// field is present. This intrinsic provides Slices for both indexed fields
// (equivalent to &o.x) and optional fields.
-intrinsic %FieldSlice<T: type>(o: T, f: constexpr string);
+intrinsic %FieldSlice<T: type, TSlice: type>(
+ o: T, f: constexpr string): TSlice;
} // namespace torque_internal
@@ -321,7 +325,7 @@ struct UninitializedIterator {}
// %RawDownCast should *never* be used anywhere in Torque code except for
// in Torque-based UnsafeCast operators preceeded by an appropriate
-// type assert()
+// type dcheck()
intrinsic %RawDownCast<To: type, From: type>(x: From): To;
intrinsic %RawConstexprCast<To: type, From: type>(f: From): To;
diff --git a/deps/v8/src/builtins/typed-array-at.tq b/deps/v8/src/builtins/typed-array-at.tq
index 6ec4730d94..cd7dcfdedd 100644
--- a/deps/v8/src/builtins/typed-array-at.tq
+++ b/deps/v8/src/builtins/typed-array-at.tq
@@ -8,9 +8,10 @@ transitioning javascript builtin TypedArrayPrototypeAt(
js-implicit context: NativeContext, receiver: JSAny)(index: JSAny): JSAny {
// 1. Let O be the this value.
// 2. Perform ? ValidateTypedArray(O).
- const o = ValidateTypedArray(context, receiver, '%TypedArray%.prototype.at');
- // 3. Let len be O.[[ArrayLength]].
- const len = Convert<Number>(o.length);
+ // 3. Let len be IntegerIndexedObjectLength(O).
+ const len = Convert<Number>(ValidateTypedArrayAndGetLength(
+ context, receiver, '%TypedArray%.prototype.at'));
+
// 4. Let relativeIndex be ? ToInteger(index).
const relativeIndex = ToInteger_Inline(index);
// 5. If relativeIndex ≥ 0, then
@@ -23,6 +24,6 @@ transitioning javascript builtin TypedArrayPrototypeAt(
return Undefined;
}
// 8. Return ? Get(O, ! ToString(k)).
- return GetProperty(o, k);
+ return GetProperty(receiver, k);
}
}
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
index cb3443284d..519d98867b 100644
--- a/deps/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -28,8 +28,8 @@ transitioning macro AllocateTypedArray(implicit context: Context)(
isLengthTracking: bool): JSTypedArray {
let elements: ByteArray;
if constexpr (isOnHeap) {
- assert(!IsResizableArrayBuffer(buffer));
- assert(!isLengthTracking);
+ dcheck(!IsResizableArrayBuffer(buffer));
+ dcheck(!isLengthTracking);
elements = AllocateByteArray(byteLength);
} else {
elements = kEmptyByteArray;
@@ -44,7 +44,7 @@ transitioning macro AllocateTypedArray(implicit context: Context)(
// allocator is NOT used. When the mock array buffer is used, impossibly
// large allocations are allowed that would erroneously cause an overflow
// and this assertion to fail.
- assert(
+ dcheck(
IsMockArrayBufferAllocatorFlag() ||
(backingStore + byteOffset) >= backingStore);
}
@@ -67,7 +67,7 @@ transitioning macro AllocateTypedArray(implicit context: Context)(
} else {
typed_array::SetJSTypedArrayOffHeapDataPtr(
typedArray, buffer.backing_store_ptr, byteOffset);
- assert(
+ dcheck(
typedArray.data_ptr ==
(buffer.backing_store_ptr + Convert<intptr>(byteOffset)));
}
@@ -164,7 +164,7 @@ transitioning macro ConstructByArrayLike(implicit context: Context)(
} else if (length > 0) {
const byteLength = typedArray.byte_length;
- assert(byteLength <= kArrayBufferMaxByteLength);
+ dcheck(byteLength <= kArrayBufferMaxByteLength);
if (IsSharedArrayBuffer(src.buffer)) {
typed_array::CallCRelaxedMemcpy(
typedArray.data_ptr, src.data_ptr, byteLength);
@@ -326,7 +326,7 @@ transitioning macro ConstructByArrayBuffer(implicit context: Context)(
transitioning macro TypedArrayCreateByLength(implicit context: Context)(
constructor: Constructor, length: Number, methodName: constexpr string):
JSTypedArray {
- assert(IsSafeInteger(length));
+ dcheck(IsSafeInteger(length));
// 1. Let newTypedArray be ? Construct(constructor, argumentList).
const newTypedArrayObj = Construct(constructor, length);
@@ -384,7 +384,7 @@ transitioning macro ConstructByJSReceiver(implicit context: Context)(
transitioning builtin CreateTypedArray(
context: Context, target: JSFunction, newTarget: JSReceiver, arg1: JSAny,
arg2: JSAny, arg3: JSAny): JSTypedArray {
- assert(IsConstructor(target));
+ dcheck(IsConstructor(target));
// 4. Let O be ? AllocateTypedArray(constructorName, NewTarget,
// "%TypedArrayPrototype%").
try {
@@ -441,7 +441,7 @@ transitioning macro TypedArraySpeciesCreate(implicit context: Context)(
// It is assumed that the CreateTypedArray builtin does not produce a
// typed array that fails ValidateTypedArray
- assert(!IsDetachedBuffer(typedArray.buffer));
+ dcheck(!IsDetachedBuffer(typedArray.buffer));
return typedArray;
} label IfSlow deferred {
@@ -455,7 +455,7 @@ transitioning macro TypedArraySpeciesCreate(implicit context: Context)(
if constexpr (numArgs == 1) {
newObj = Construct(constructor, arg0);
} else {
- assert(numArgs == 3);
+ dcheck(numArgs == 3);
newObj = Construct(constructor, arg0, arg1, arg2);
}
diff --git a/deps/v8/src/builtins/typed-array-every.tq b/deps/v8/src/builtins/typed-array-every.tq
index 8c662bffb7..f2701a040b 100644
--- a/deps/v8/src/builtins/typed-array-every.tq
+++ b/deps/v8/src/builtins/typed-array-every.tq
@@ -9,11 +9,11 @@ const kBuiltinNameEvery: constexpr string = '%TypedArray%.prototype.every';
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.every
transitioning macro EveryAllElements(implicit context: Context)(
- array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
- thisArg: JSAny): Boolean {
+ array: typed_array::AttachedJSTypedArray, length: uintptr,
+ callbackfn: Callable, thisArg: JSAny): Boolean {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
- const length: uintptr = witness.Get().length;
+ // 5. Let k be 0.
// 6. Repeat, while k < len
for (let k: uintptr = 0; k < length; k++) {
// 6a. Let Pk be ! ToString(𝔽(k)).
@@ -23,9 +23,9 @@ transitioning macro EveryAllElements(implicit context: Context)(
// kValue must be undefined when the buffer is detached.
let value: JSAny;
try {
- witness.Recheck() otherwise goto IsDetached;
+ witness.RecheckIndex(k) otherwise goto IsDetachedOrOutOfBounds;
value = witness.Load(k);
- } label IsDetached deferred {
+ } label IsDetachedOrOutOfBounds deferred {
value = Undefined;
}
@@ -54,19 +54,24 @@ TypedArrayPrototypeEvery(
// arguments[0] = callback
// arguments[1] = thisArg
try {
+ // 1. Let O be the this value.
+ // 2. Perform ? ValidateTypedArray(O).
+ // 3. Let len be IntegerIndexedObjectLength(O).
const array: JSTypedArray = Cast<JSTypedArray>(receiver)
otherwise NotTypedArray;
- const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
-
+ const length = LoadJSTypedArrayLengthAndCheckDetached(array)
+ otherwise IsDetachedOrOutOfBounds;
+ // 4. If IsCallable(callbackfn) is false, throw a TypeError exception.
const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
const thisArg = arguments[1];
- return EveryAllElements(uarray, callbackfn, thisArg);
- } label NotCallable deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ return EveryAllElements(
+ %RawDownCast<AttachedJSTypedArray>(array), length, callbackfn, thisArg);
} label NotTypedArray deferred {
ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameEvery);
- } label IsDetached deferred {
+ } label IsDetachedOrOutOfBounds deferred {
ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameEvery);
+ } label NotCallable deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
}
}
}
diff --git a/deps/v8/src/builtins/typed-array-set.tq b/deps/v8/src/builtins/typed-array-set.tq
index eeb521e3f6..e40ff9f737 100644
--- a/deps/v8/src/builtins/typed-array-set.tq
+++ b/deps/v8/src/builtins/typed-array-set.tq
@@ -255,8 +255,8 @@ TypedArrayPrototypeSetTypedArray(implicit context: Context, receiver: JSAny)(
otherwise unreachable;
const dstPtr: RawPtr = target.data_ptr + Convert<intptr>(startOffset);
- assert(countBytes <= target.byte_length - startOffset);
- assert(countBytes <= typedArray.byte_length);
+ dcheck(countBytes <= target.byte_length - startOffset);
+ dcheck(countBytes <= typedArray.byte_length);
// 29. If srcType is the same as targetType, then
// a. NOTE: If srcType and targetType are the same, the transfer must
diff --git a/deps/v8/src/builtins/typed-array-slice.tq b/deps/v8/src/builtins/typed-array-slice.tq
index 2a18433f93..356bf36d4c 100644
--- a/deps/v8/src/builtins/typed-array-slice.tq
+++ b/deps/v8/src/builtins/typed-array-slice.tq
@@ -12,7 +12,7 @@ extern macro TypedArrayBuiltinsAssembler::CallCCopyTypedArrayElementsSlice(
macro FastCopy(
src: typed_array::AttachedJSTypedArray, dest: JSTypedArray, k: uintptr,
- count: uintptr) labels IfSlow {
+ count: uintptr): void labels IfSlow {
if (IsForceSlowPath()) goto IfSlow;
const srcKind: ElementsKind = src.elements_kind;
@@ -22,7 +22,10 @@ macro FastCopy(
// with the src because of custom species constructor. If the types
// of src and result array are the same and they are not sharing the
// same buffer, use memmove.
- if (srcKind != destInfo.kind) goto IfSlow;
+ if (srcKind != destInfo.kind) {
+ // TODO(v8:11111): Enable the fast branch for RAB / GSAB.
+ goto IfSlow;
+ }
if (dest.buffer == src.buffer) {
goto IfSlow;
}
@@ -33,8 +36,8 @@ macro FastCopy(
otherwise unreachable;
const srcPtr: RawPtr = src.data_ptr + Convert<intptr>(startOffset);
- assert(countBytes <= dest.byte_length);
- assert(countBytes <= src.byte_length - startOffset);
+ dcheck(countBytes <= dest.byte_length);
+ dcheck(countBytes <= src.byte_length - startOffset);
if (IsSharedArrayBuffer(src.buffer)) {
// SABs need a relaxed memmove to preserve atomicity.
@@ -45,7 +48,7 @@ macro FastCopy(
}
macro SlowCopy(implicit context: Context)(
- src: JSTypedArray, dest: JSTypedArray, k: uintptr, final: uintptr) {
+ src: JSTypedArray, dest: JSTypedArray, k: uintptr, final: uintptr): void {
if (typed_array::IsBigInt64ElementsKind(src.elements_kind) !=
typed_array::IsBigInt64ElementsKind(dest.elements_kind))
deferred {
@@ -63,11 +66,10 @@ transitioning javascript builtin TypedArrayPrototypeSlice(
// 1. Let O be the this value.
// 2. Perform ? ValidateTypedArray(O).
- const src: JSTypedArray =
- ValidateTypedArray(context, receiver, kBuiltinNameSlice);
-
// 3. Let len be O.[[ArrayLength]].
- const len: uintptr = src.length;
+ const len =
+ ValidateTypedArrayAndGetLength(context, receiver, kBuiltinNameSlice);
+ const src: JSTypedArray = UnsafeCast<JSTypedArray>(receiver);
// 4. Let relativeStart be ? ToInteger(start).
// 5. If relativeStart < 0, let k be max((len + relativeStart), 0);
@@ -81,11 +83,11 @@ transitioning javascript builtin TypedArrayPrototypeSlice(
// 7. If relativeEnd < 0, let final be max((len + relativeEnd), 0);
// else let final be min(relativeEnd, len).
const end = arguments[1];
- const final: uintptr =
+ let final: uintptr =
end != Undefined ? ConvertToRelativeIndex(end, len) : len;
// 8. Let count be max(final - k, 0).
- const count: uintptr = Unsigned(IntPtrMax(Signed(final - k), 0));
+ let count: uintptr = Unsigned(IntPtrMax(Signed(final - k), 0));
// 9. Let A be ? TypedArraySpeciesCreate(O, « count »).
const dest: JSTypedArray =
@@ -93,9 +95,19 @@ transitioning javascript builtin TypedArrayPrototypeSlice(
if (count > 0) {
try {
- const srcAttached = typed_array::EnsureAttached(src)
- otherwise IfDetached;
- FastCopy(srcAttached, dest, k, count) otherwise IfSlow;
+ const newLength =
+ LoadJSTypedArrayLengthAndCheckDetached(src) otherwise IfDetached;
+ // If the backing buffer is a RAB, it's possible that the length has
+ // decreased since the last time we loaded it.
+ if (k >= newLength) {
+ return dest;
+ }
+ if (final > newLength) {
+ final = newLength;
+ count = Unsigned(IntPtrMax(Signed(final - k), 0));
+ }
+ FastCopy(%RawDownCast<AttachedJSTypedArray>(src), dest, k, count)
+ otherwise IfSlow;
} label IfDetached deferred {
ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameSlice);
} label IfSlow deferred {
diff --git a/deps/v8/src/builtins/typed-array-some.tq b/deps/v8/src/builtins/typed-array-some.tq
index 9946907680..d9f37937b4 100644
--- a/deps/v8/src/builtins/typed-array-some.tq
+++ b/deps/v8/src/builtins/typed-array-some.tq
@@ -9,11 +9,11 @@ const kBuiltinNameSome: constexpr string = '%TypedArray%.prototype.some';
// https://tc39.es/ecma262/#sec-%typedarray%.prototype.some
transitioning macro SomeAllElements(implicit context: Context)(
- array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
- thisArg: JSAny): Boolean {
+ array: typed_array::AttachedJSTypedArray, length: uintptr,
+ callbackfn: Callable, thisArg: JSAny): Boolean {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
- const length: uintptr = witness.Get().length;
+ // 5. Let k be 0.
// 6. Repeat, while k < len
for (let k: uintptr = 0; k < length; k++) {
// 6a. Let Pk be ! ToString(𝔽(k)).
@@ -23,9 +23,9 @@ transitioning macro SomeAllElements(implicit context: Context)(
// kValue must be undefined when the buffer is detached.
let value: JSAny;
try {
- witness.Recheck() otherwise goto IsDetached;
+ witness.RecheckIndex(k) otherwise goto IsDetachedOrOutOfBounds;
value = witness.Load(k);
- } label IsDetached deferred {
+ } label IsDetachedOrOutOfBounds deferred {
value = Undefined;
}
@@ -54,22 +54,26 @@ transitioning javascript builtin
TypedArrayPrototypeSome(
js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny {
// arguments[0] = callback
- // arguments[1] = thisArg.
+ // arguments[1] = thisArg
try {
+ // 1. Let O be the this value.
+ // 2. Perform ? ValidateTypedArray(O).
+ // 3. Let len be IntegerIndexedObjectLength(O).
const array: JSTypedArray = Cast<JSTypedArray>(receiver)
otherwise NotTypedArray;
- const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
-
+ const length = LoadJSTypedArrayLengthAndCheckDetached(array)
+ otherwise IsDetachedOrOutOfBounds;
+ // 4. If IsCallable(callbackfn) is false, throw a TypeError exception.
const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
const thisArg = arguments[1];
-
- return SomeAllElements(uarray, callbackfn, thisArg);
- } label NotCallable deferred {
- ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
+ return SomeAllElements(
+ %RawDownCast<AttachedJSTypedArray>(array), length, callbackfn, thisArg);
} label NotTypedArray deferred {
ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameSome);
- } label IsDetached deferred {
+ } label IsDetachedOrOutOfBounds deferred {
ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameSome);
+ } label NotCallable deferred {
+ ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
}
}
}
diff --git a/deps/v8/src/builtins/typed-array-sort.tq b/deps/v8/src/builtins/typed-array-sort.tq
index 614852f444..1487d1396f 100644
--- a/deps/v8/src/builtins/typed-array-sort.tq
+++ b/deps/v8/src/builtins/typed-array-sort.tq
@@ -33,7 +33,7 @@ transitioning macro
TypedArrayMerge(
implicit context: Context, array: JSTypedArray, comparefn: Callable)(
source: FixedArray, from: uintptr, middle: uintptr, to: uintptr,
- target: FixedArray) {
+ target: FixedArray): void {
let left: uintptr = from;
let right: uintptr = middle;
@@ -56,7 +56,7 @@ TypedArrayMerge(
} else {
// No elements on the left, but the right does, so we take
// from the right.
- assert(left == middle);
+ dcheck(left == middle);
target.objects[targetIndex] = source.objects[right++];
}
}
@@ -66,7 +66,7 @@ transitioning builtin
TypedArrayMergeSort(implicit context: Context)(
source: FixedArray, from: uintptr, to: uintptr, target: FixedArray,
array: JSTypedArray, comparefn: Callable): JSAny {
- assert(to - from > 1);
+ dcheck(to - from > 1);
const middle: uintptr = from + ((to - from) >>> 1);
// On the next recursion step source becomes target and vice versa.
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index 582388b75d..c64573cb3b 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -19,6 +19,7 @@ type Float64Elements extends ElementsKind;
type Uint8ClampedElements extends ElementsKind;
type BigUint64Elements extends ElementsKind;
type BigInt64Elements extends ElementsKind;
+type RabGsabUint8Elements extends ElementsKind;
@export
struct TypedArrayElementsInfo {
@@ -56,6 +57,8 @@ extern runtime TypedArrayCopyElements(
Context, JSTypedArray, Object, Number): void;
extern macro TypedArrayBuiltinsAssembler::ValidateTypedArray(
Context, JSAny, constexpr string): JSTypedArray;
+extern macro TypedArrayBuiltinsAssembler::ValidateTypedArrayAndGetLength(
+ Context, JSAny, constexpr string): uintptr;
extern macro TypedArrayBuiltinsAssembler::CallCMemcpy(
RawPtr, RawPtr, uintptr): void;
@@ -80,9 +83,12 @@ extern macro TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind(ElementsKind):
extern macro LoadFixedTypedArrayElementAsTagged(
RawPtr, uintptr, constexpr ElementsKind): Numeric;
extern macro TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromNumeric(
- Context, JSTypedArray, uintptr, Numeric, constexpr ElementsKind);
+ Context, JSTypedArray, uintptr, Numeric, constexpr ElementsKind): void;
extern macro TypedArrayBuiltinsAssembler::StoreJSTypedArrayElementFromTagged(
- Context, JSTypedArray, uintptr, JSAny, constexpr ElementsKind)
+ Context, JSTypedArray, uintptr, JSAny,
+ constexpr ElementsKind): void labels IfDetached;
+
+extern macro LoadJSTypedArrayLengthAndCheckDetached(JSTypedArray): uintptr
labels IfDetached;
type LoadNumericFn = builtin(JSTypedArray, uintptr) => Numeric;
@@ -100,21 +106,22 @@ struct TypedArrayAccessor {
}
macro StoreNumeric(
- context: Context, array: JSTypedArray, index: uintptr, value: Numeric) {
+ context: Context, array: JSTypedArray, index: uintptr,
+ value: Numeric): void {
const storefn: StoreNumericFn = this.storeNumericFn;
const result = storefn(context, array, index, value);
- assert(result == kStoreSucceded);
+ dcheck(result == kStoreSucceded);
}
macro StoreJSAny(
- context: Context, array: JSTypedArray, index: uintptr, value: JSAny)
- labels IfDetached {
+ context: Context, array: JSTypedArray, index: uintptr,
+ value: JSAny): void labels IfDetached {
const storefn: StoreJSAnyFn = this.storeJSAnyFn;
const result = storefn(context, array, index, value);
if (result == kStoreFailureArrayDetached) {
goto IfDetached;
}
- assert(result == kStoreSucceded);
+ dcheck(result == kStoreSucceded);
}
loadNumericFn: LoadNumericFn;
@@ -130,7 +137,15 @@ macro GetTypedArrayAccessor<T : type extends ElementsKind>():
return TypedArrayAccessor{loadNumericFn, storeNumericFn, storeJSAnyFn};
}
-macro GetTypedArrayAccessor(elementsKind: ElementsKind): TypedArrayAccessor {
+macro GetTypedArrayAccessor(elementsKindParam: ElementsKind):
+ TypedArrayAccessor {
+ let elementsKind = elementsKindParam;
+ if (IsElementsKindGreaterThanOrEqual(
+ elementsKind, kFirstRabGsabFixedTypedArrayElementsKind)) {
+ elementsKind = %RawDownCast<ElementsKind>(
+ elementsKind - kFirstRabGsabFixedTypedArrayElementsKind +
+ kFirstFixedTypedArrayElementsKind);
+ }
if (IsElementsKindGreaterThan(elementsKind, ElementsKind::UINT32_ELEMENTS)) {
if (elementsKind == ElementsKind::INT32_ELEMENTS) {
return GetTypedArrayAccessor<Int32Elements>();
@@ -165,14 +180,19 @@ extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
JSTypedArray, ByteArray, uintptr): void;
extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr(
JSTypedArray, RawPtr, uintptr): void;
+extern macro IsJSTypedArrayDetachedOrOutOfBounds(JSTypedArray):
+ never labels Detached, NotDetached;
// AttachedJSTypedArray guards that the array's buffer is not detached.
transient type AttachedJSTypedArray extends JSTypedArray;
macro EnsureAttached(array: JSTypedArray): AttachedJSTypedArray
labels Detached {
- if (IsDetachedBuffer(array.buffer)) goto Detached;
- return %RawDownCast<AttachedJSTypedArray>(array);
+ try {
+ IsJSTypedArrayDetachedOrOutOfBounds(array) otherwise Detached, NotDetached;
+ } label NotDetached {
+ return %RawDownCast<AttachedJSTypedArray>(array);
+ }
}
struct AttachedJSTypedArrayWitness {
@@ -184,11 +204,21 @@ struct AttachedJSTypedArrayWitness {
return this.stable;
}
- macro Recheck() labels Detached {
+ // TODO(v8:11111): Migrate users to use RecheckIndex.
+ macro Recheck(): void labels Detached {
if (IsDetachedBuffer(this.stable.buffer)) goto Detached;
this.unstable = %RawDownCast<AttachedJSTypedArray>(this.stable);
}
+ macro RecheckIndex(index: uintptr): void labels DetachedOrOutOfBounds {
+ const length = LoadJSTypedArrayLengthAndCheckDetached(this.stable)
+ otherwise DetachedOrOutOfBounds;
+ if (index >= length) {
+ goto DetachedOrOutOfBounds;
+ }
+ this.unstable = %RawDownCast<AttachedJSTypedArray>(this.stable);
+ }
+
macro Load(implicit context: Context)(k: uintptr): JSAny {
const lf: LoadNumericFn = this.loadfn;
return lf(this.unstable, k);
diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq
index ec786311be..e4aea1446d 100644
--- a/deps/v8/src/builtins/wasm.tq
+++ b/deps/v8/src/builtins/wasm.tq
@@ -160,7 +160,7 @@ builtin WasmTableGet(tableIndex: intptr, index: int32): Object {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const entryIndex: intptr = ChangeInt32ToIntPtr(index);
try {
- assert(IsValidPositiveSmi(tableIndex));
+ dcheck(IsValidPositiveSmi(tableIndex));
if (!IsValidPositiveSmi(entryIndex)) goto IndexOutOfRange;
const tables: FixedArray = LoadTablesFromInstance(instance);
@@ -193,7 +193,7 @@ builtin WasmTableSet(tableIndex: intptr, index: int32, value: Object): Object {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const entryIndex: intptr = ChangeInt32ToIntPtr(index);
try {
- assert(IsValidPositiveSmi(tableIndex));
+ dcheck(IsValidPositiveSmi(tableIndex));
if (!IsValidPositiveSmi(entryIndex)) goto IndexOutOfRange;
const tables: FixedArray = LoadTablesFromInstance(instance);
@@ -364,6 +364,7 @@ builtin WasmArrayCopyWithChecks(
srcIndex + length > srcArray.length || srcIndex + length < srcIndex) {
tail ThrowWasmTrapArrayOutOfBounds();
}
+ if (length == 0) return Undefined;
tail runtime::WasmArrayCopy(
LoadContextFromFrame(), dstArray, SmiFromUint32(dstIndex), srcArray,
SmiFromUint32(srcIndex), SmiFromUint32(length));
@@ -371,7 +372,7 @@ builtin WasmArrayCopyWithChecks(
// Redeclaration with different typing (value is an Object, not JSAny).
extern transitioning runtime
-CreateDataProperty(implicit context: Context)(JSReceiver, JSAny, Object);
+CreateDataProperty(implicit context: Context)(JSReceiver, JSAny, Object): void;
transitioning builtin WasmAllocateObjectWrapper(implicit context: Context)(
obj: Object): JSObject {
@@ -412,7 +413,7 @@ builtin UintPtr53ToNumber(value: uintptr): Number {
const valueFloat = ChangeUintPtrToFloat64(value);
// Values need to be within [0..2^53], such that they can be represented as
// float64.
- assert(ChangeFloat64ToUintPtr(valueFloat) == value);
+ dcheck(ChangeFloat64ToUintPtr(valueFloat) == value);
return AllocateHeapNumberWithValue(valueFloat);
}
diff --git a/deps/v8/src/builtins/weak-ref.tq b/deps/v8/src/builtins/weak-ref.tq
index 18385e52db..56d3fc1c43 100644
--- a/deps/v8/src/builtins/weak-ref.tq
+++ b/deps/v8/src/builtins/weak-ref.tq
@@ -4,7 +4,8 @@
namespace runtime {
-extern runtime JSWeakRefAddToKeptObjects(implicit context: Context)(JSReceiver);
+extern runtime JSWeakRefAddToKeptObjects(implicit context: Context)(JSReceiver):
+ void;
} // namespace runtime
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index f5ef0877bc..f2f3624361 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -1828,7 +1828,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// Push the baseline code return address now, as if it had been pushed by
// the call to this builtin.
__ PushReturnAddressFrom(return_address);
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ FrameScope inner_frame_scope(masm, StackFrame::INTERNAL);
// Save incoming new target or generator
__ Push(new_target);
__ SmiTag(frame_size);
@@ -2377,15 +2377,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
StackArgumentsAccessor args(rax);
__ AssertFunction(rdi);
- // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
- // Check that the function is not a "classConstructor".
- Label class_constructor;
__ LoadTaggedPointerField(
rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
- Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
- __ j(not_zero, &class_constructor);
-
// ----------- S t a t e -------------
// -- rax : the number of arguments
// -- rdx : the shared function info.
@@ -2470,14 +2463,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ movzxwq(
rbx, FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
__ InvokeFunctionCode(rdi, no_reg, rbx, rax, InvokeType::kJump);
-
- // The function is a "classConstructor", need to raise an exception.
- __ bind(&class_constructor);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ Push(rdi);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
- }
}
namespace {
@@ -2589,36 +2574,48 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// -- rax : the number of arguments
// -- rdi : the target to call (can be any Object)
// -----------------------------------
- StackArgumentsAccessor args(rax);
+ Register argc = rax;
+ Register target = rdi;
+ Register map = rcx;
+ Register instance_type = rdx;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
+
+ StackArgumentsAccessor args(argc);
- Label non_callable;
- __ JumpIfSmi(rdi, &non_callable);
- __ LoadMap(rcx, rdi);
- __ CmpInstanceTypeRange(rcx, FIRST_JS_FUNCTION_TYPE, LAST_JS_FUNCTION_TYPE);
+ Label non_callable, class_constructor;
+ __ JumpIfSmi(target, &non_callable);
+ __ LoadMap(map, target);
+ __ CmpInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE,
+ LAST_CALLABLE_JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, below_equal);
- __ CmpInstanceType(rcx, JS_BOUND_FUNCTION_TYPE);
+ __ cmpw(instance_type, Immediate(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
RelocInfo::CODE_TARGET, equal);
// Check if target has a [[Call]] internal method.
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ __ testb(FieldOperand(map, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsCallableBit::kMask));
__ j(zero, &non_callable, Label::kNear);
// Check if target is a proxy and call CallProxy external builtin
- __ CmpInstanceType(rcx, JS_PROXY_TYPE);
+ __ cmpw(instance_type, Immediate(JS_PROXY_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET,
equal);
+ // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ __ cmpw(instance_type, Immediate(JS_CLASS_CONSTRUCTOR_TYPE));
+ __ j(equal, &class_constructor);
+
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
// Overwrite the original receiver with the (original) target.
- __ movq(args.GetReceiverOperand(), rdi);
+ __ movq(args.GetReceiverOperand(), target);
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadNativeContextSlot(rdi, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2627,8 +2624,18 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ bind(&non_callable);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(rdi);
+ __ Push(target);
__ CallRuntime(Runtime::kThrowCalledNonCallable);
+ __ Trap(); // Unreachable.
+ }
+
+ // 4. The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ Push(target);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ __ Trap(); // Unreachable.
}
}
@@ -2695,40 +2702,48 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// the JSFunction on which new was invoked initially)
// -- rdi : the constructor to call (can be any Object)
// -----------------------------------
- StackArgumentsAccessor args(rax);
+ Register argc = rax;
+ Register target = rdi;
+ Register map = rcx;
+ Register instance_type = r8;
+ DCHECK(!AreAliased(argc, target, map, instance_type));
+
+ StackArgumentsAccessor args(argc);
// Check if target is a Smi.
Label non_constructor;
- __ JumpIfSmi(rdi, &non_constructor);
+ __ JumpIfSmi(target, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ LoadMap(rcx, rdi);
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ __ LoadMap(map, target);
+ __ testb(FieldOperand(map, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsConstructorBit::kMask));
__ j(zero, &non_constructor);
// Dispatch based on instance type.
- __ CmpInstanceTypeRange(rcx, FIRST_JS_FUNCTION_TYPE, LAST_JS_FUNCTION_TYPE);
+ __ CmpInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
RelocInfo::CODE_TARGET, below_equal);
// Only dispatch to bound functions after checking whether they are
// constructors.
- __ CmpInstanceType(rcx, JS_BOUND_FUNCTION_TYPE);
+ __ cmpw(instance_type, Immediate(JS_BOUND_FUNCTION_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
RelocInfo::CODE_TARGET, equal);
// Only dispatch to proxies after checking whether they are constructors.
- __ CmpInstanceType(rcx, JS_PROXY_TYPE);
+ __ cmpw(instance_type, Immediate(JS_PROXY_TYPE));
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy), RelocInfo::CODE_TARGET,
equal);
// Called Construct on an exotic Object with a [[Construct]] internal method.
{
// Overwrite the original receiver with the (original) target.
- __ movq(args.GetReceiverOperand(), rdi);
+ __ movq(args.GetReceiverOperand(), target);
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadNativeContextSlot(rdi, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
+ __ LoadNativeContextSlot(target,
+ Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/codegen/arm/assembler-arm-inl.h b/deps/v8/src/codegen/arm/assembler-arm-inl.h
index f72e27703e..2c0e69a753 100644
--- a/deps/v8/src/codegen/arm/assembler-arm-inl.h
+++ b/deps/v8/src/codegen/arm/assembler-arm-inl.h
@@ -101,7 +101,7 @@ HeapObject RelocInfo::target_object() {
Object(Assembler::target_address_at(pc_, constant_pool_)));
}
-HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
return target_object();
}
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index adeb8748ce..aebfaab932 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -1642,8 +1642,10 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
- cmp(expected_parameter_count, Operand(kDontAdaptArgumentsSentinel));
- b(eq, &regular_invoke);
+ if (kDontAdaptArgumentsSentinel != 0) {
+ cmp(expected_parameter_count, Operand(kDontAdaptArgumentsSentinel));
+ b(eq, &regular_invoke);
+ }
// If overapplication or if the actual argument count is equal to the
// formal parameter count, no need to push extra undefined values.
@@ -1692,8 +1694,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
bkpt(0);
}
@@ -1707,7 +1709,8 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
ASM_CODE_COMMENT(this);
// Load receiver to pass it later to DebugOnFunctionCall hook.
ldr(r4, ReceiverOperand(actual_parameter_count));
- FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
Push(expected_parameter_count);
@@ -1874,16 +1877,26 @@ void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
cmp(type_reg, Operand(type));
}
+void MacroAssembler::CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_LT(lower_limit, higher_limit);
+ if (lower_limit != 0) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ sub(scratch, value, Operand(lower_limit));
+ cmp(scratch, Operand(higher_limit - lower_limit));
+ } else {
+ cmp(value, Operand(higher_limit));
+ }
+}
void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
InstanceType lower_limit,
InstanceType higher_limit) {
ASM_CODE_COMMENT(this);
DCHECK_LT(lower_limit, higher_limit);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- sub(scratch, type_reg, Operand(lower_limit));
- cmp(scratch, Operand(higher_limit - lower_limit));
+ CompareRange(type_reg, lower_limit, higher_limit);
}
void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
@@ -1898,14 +1911,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit,
Label* on_in_range) {
ASM_CODE_COMMENT(this);
- if (lower_limit != 0) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- sub(scratch, value, Operand(lower_limit));
- cmp(scratch, Operand(higher_limit - lower_limit));
- } else {
- cmp(value, Operand(higher_limit));
- }
+ CompareRange(value, lower_limit, higher_limit);
b(ls, on_in_range);
}
@@ -2089,7 +2095,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
Move32BitImmediate(r0, Operand(static_cast<int>(reason)));
PrepareCallCFunction(1, 0, r1);
Move(r1, ExternalReference::abort_with_reason());
@@ -2105,7 +2111,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index bcecaec429..3dc3e208f5 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -518,6 +518,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
mov(dst, Operand::SmiUntag(src), s);
}
+ void SmiToInt32(Register smi) { SmiUntag(smi); }
+
// Load an object from the root table.
void LoadRoot(Register destination, RootIndex index) final {
LoadRoot(destination, index, al);
@@ -755,7 +757,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
// Checks if value is in range [lower_limit, higher_limit] using a single
- // comparison.
+ // comparison. Flags C=0 or Z=1 indicate the value is in the range (condition
+ // ls).
+ void CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit);
void JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit, Label* on_in_range);
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
index 2668502f81..41d07b10b1 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
@@ -670,10 +670,10 @@ HeapObject RelocInfo::target_object() {
}
}
-HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
if (IsCompressedEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(DecompressTaggedAny(
- isolate,
+ cage_base,
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {
return target_object();
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
index 8986df823a..48b8a5f06a 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
@@ -1091,6 +1091,19 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
void TurboAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
+void TurboAssembler::SmiToInt32(Register smi) {
+ DCHECK(smi.Is64Bits());
+ if (FLAG_enable_slow_asserts) {
+ AssertSmi(smi);
+ }
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
+ if (COMPRESS_POINTERS_BOOL) {
+ Asr(smi.W(), smi.W(), kSmiShift);
+ } else {
+ Lsr(smi, smi, kSmiShift);
+ }
+}
+
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
Label* not_smi_label) {
STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index 36efeb3d75..91d972ea00 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -256,8 +256,7 @@ void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
bool invert_move = false;
// If the number of 0xFFFF halfwords is greater than the number of 0x0000
// halfwords, it's more efficient to use move-inverted.
- if (CountClearHalfWords(~imm, reg_size) >
- CountClearHalfWords(imm, reg_size)) {
+ if (CountSetHalfWords(imm, reg_size) > CountSetHalfWords(~imm, reg_size)) {
ignored_halfword = 0xFFFFL;
invert_move = true;
}
@@ -560,23 +559,27 @@ void TurboAssembler::Mvn(const Register& rd, const Operand& operand) {
}
}
-unsigned TurboAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
- DCHECK_EQ(reg_size % 8, 0);
- int count = 0;
- for (unsigned i = 0; i < (reg_size / 16); i++) {
- if ((imm & 0xFFFF) == 0) {
- count++;
- }
- imm >>= 16;
+unsigned TurboAssembler::CountSetHalfWords(uint64_t imm, unsigned reg_size) {
+ DCHECK_EQ(reg_size % 16, 0);
+
+#define HALFWORD(idx) (((imm >> ((idx)*16)) & 0xFFFF) ? 1u : 0u)
+ switch (reg_size / 16) {
+ case 1:
+ return HALFWORD(0);
+ case 2:
+ return HALFWORD(0) + HALFWORD(1);
+ case 4:
+ return HALFWORD(0) + HALFWORD(1) + HALFWORD(2) + HALFWORD(3);
}
- return count;
+#undef HALFWORD
+ UNREACHABLE();
}
// The movz instruction can generate immediates containing an arbitrary 16-bit
// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
bool TurboAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
- return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
+ return CountSetHalfWords(imm, reg_size) <= 1;
}
// The movn instruction can generate immediates containing an arbitrary 16-bit
@@ -1690,6 +1693,7 @@ void TurboAssembler::CallCFunction(ExternalReference function,
}
static const int kRegisterPassedArguments = 8;
+static const int kFPRegisterPassedArguments = 8;
void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
int num_of_double_args) {
@@ -1697,17 +1701,6 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
DCHECK_LE(num_of_reg_args + num_of_double_args, kMaxCParameters);
DCHECK(has_frame());
- // If we're passing doubles, we're limited to the following prototypes
- // (defined by ExternalReference::Type):
- // BUILTIN_COMPARE_CALL: int f(double, double)
- // BUILTIN_FP_FP_CALL: double f(double, double)
- // BUILTIN_FP_CALL: double f(double)
- // BUILTIN_FP_INT_CALL: double f(double, int)
- if (num_of_double_args > 0) {
- DCHECK_LE(num_of_reg_args, 1);
- DCHECK_LE(num_of_double_args + num_of_reg_args, 2);
- }
-
// Save the frame pointer and PC so that the stack layout remains iterable,
// even without an ExitFrame which normally exists between JS and C frames.
Register pc_scratch = x4;
@@ -1758,6 +1751,13 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
int claim_slots = RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
Drop(claim_slots);
}
+
+ if (num_of_double_args > kFPRegisterPassedArguments) {
+ // Drop the register passed arguments.
+ int claim_slots =
+ RoundUp(num_of_double_args - kFPRegisterPassedArguments, 2);
+ Drop(claim_slots);
+ }
}
void TurboAssembler::LoadFromConstantsTable(Register destination,
@@ -2041,9 +2041,9 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// (off-heap) entry point directly without going through the (on-heap)
// trampoline. Otherwise, just call the Code object as always.
- Ldrsw(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
- Tst(scratch, Operand(Code::IsOffHeapTrampoline::kMask));
- B(ne, &if_code_is_off_heap);
+ Ldr(scratch.W(), FieldMemOperand(code_object, Code::kFlagsOffset));
+ TestAndBranchIfAnySet(scratch.W(), Code::IsOffHeapTrampoline::kMask,
+ &if_code_is_off_heap);
// Not an off-heap trampoline object, the entry point is at
// Code::raw_instruction_start().
@@ -2054,8 +2054,8 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// table.
bind(&if_code_is_off_heap);
Ldrsw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
- Lsl(destination, scratch, kSystemPointerSizeLog2);
- Add(destination, destination, kRootRegister);
+ Add(destination, kRootRegister,
+ Operand(scratch, LSL, kSystemPointerSizeLog2));
Ldr(destination,
MemOperand(destination, IsolateData::builtin_entry_table_offset()));
@@ -2088,8 +2088,12 @@ void TurboAssembler::LoadCodeDataContainerEntry(
Register destination, Register code_data_container_object) {
ASM_CODE_COMMENT(this);
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- Ldr(destination, FieldMemOperand(code_data_container_object,
- CodeDataContainer::kCodeEntryPointOffset));
+
+ LoadExternalPointerField(
+ destination,
+ FieldMemOperand(code_data_container_object,
+ CodeDataContainer::kCodeEntryPointOffset),
+ kCodeEntryPointTag);
}
void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
@@ -2259,8 +2263,10 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
// If the formal parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
- Cmp(formal_parameter_count, Operand(kDontAdaptArgumentsSentinel));
- B(eq, &regular_invoke);
+ if (kDontAdaptArgumentsSentinel != 0) {
+ Cmp(formal_parameter_count, Operand(kDontAdaptArgumentsSentinel));
+ B(eq, &regular_invoke);
+ }
// If overapplication or if the actual argument count is equal to the
// formal parameter count, no need to push extra undefined values.
@@ -2354,8 +2360,8 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
Unreachable();
}
@@ -2369,7 +2375,8 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
ASM_CODE_COMMENT(this);
// Load receiver to pass it later to DebugOnFunctionCall hook.
Peek(x4, ReceiverOperand(actual_parameter_count));
- FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
if (!new_target.is_valid()) new_target = padreg;
@@ -3056,6 +3063,34 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Bind(&done);
}
+void TurboAssembler::LoadExternalPointerField(Register destination,
+ MemOperand field_operand,
+ ExternalPointerTag tag,
+ Register isolate_root) {
+ DCHECK(!AreAliased(destination, isolate_root));
+ ASM_CODE_COMMENT(this);
+#ifdef V8_HEAP_SANDBOX
+ UseScratchRegisterScope temps(this);
+ Register external_table = temps.AcquireX();
+ if (isolate_root == no_reg) {
+ DCHECK(root_array_available_);
+ isolate_root = kRootRegister;
+ }
+ Ldr(external_table,
+ MemOperand(isolate_root,
+ IsolateData::external_pointer_table_offset() +
+ Internals::kExternalPointerTableBufferOffset));
+ Ldr(destination, field_operand);
+ Ldr(destination,
+ MemOperand(external_table, destination, LSL, kSystemPointerSizeLog2));
+ if (tag != 0) {
+ And(destination, destination, Immediate(~tag));
+ }
+#else
+ Ldr(destination, field_operand);
+#endif // V8_HEAP_SANDBOX
+}
+
void TurboAssembler::MaybeSaveRegisters(RegList registers) {
if (registers == 0) return;
ASM_CODE_COMMENT(this);
@@ -3274,7 +3309,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
Mov(w0, static_cast<int>(reason));
Call(ExternalReference::abort_with_reason());
return;
@@ -3288,7 +3323,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index 11a5e7eb9a..8f60217d9e 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -559,6 +559,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
inline void SmiUntag(Register dst, const MemOperand& src);
inline void SmiUntag(Register smi);
+ inline void SmiToInt32(Register smi);
+
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cond, AbortReason reason);
@@ -643,7 +645,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Define a jump/call target and bind a label.
inline void BindJumpOrCallTarget(Label* label);
- static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
+ static unsigned CountSetHalfWords(uint64_t imm, unsigned reg_size);
CPURegList* TmpList() { return &tmp_list_; }
CPURegList* FPTmpList() { return &fptmp_list_; }
@@ -1432,6 +1434,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void I64x2BitMask(Register dst, VRegister src);
void I64x2AllTrue(Register dst, VRegister src);
+ // ---------------------------------------------------------------------------
+ // V8 Heap sandbox support
+
+ // Loads a field containing off-heap pointer and does necessary decoding
+ // if V8 heap sandbox is enabled.
+ void LoadExternalPointerField(Register destination, MemOperand field_operand,
+ ExternalPointerTag tag,
+ Register isolate_root = Register::no_reg());
+
protected:
// The actual Push and Pop implementations. These don't generate any code
// other than that required for the push or pop. This allows
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index ae6c4c9200..29a4212aac 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -547,8 +547,6 @@ using Simd128Register = VRegister;
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index 92686eff12..e61933b05a 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -68,7 +68,7 @@ void CodeStubAssembler::HandleBreakOnNode() {
BreakOnNode(node_id);
}
-void CodeStubAssembler::Assert(const BranchGenerator& branch,
+void CodeStubAssembler::Dcheck(const BranchGenerator& branch,
const char* message, const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes) {
#if defined(DEBUG)
@@ -78,7 +78,7 @@ void CodeStubAssembler::Assert(const BranchGenerator& branch,
#endif
}
-void CodeStubAssembler::Assert(const NodeGenerator<BoolT>& condition_body,
+void CodeStubAssembler::Dcheck(const NodeGenerator<BoolT>& condition_body,
const char* message, const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes) {
#if defined(DEBUG)
@@ -88,7 +88,7 @@ void CodeStubAssembler::Assert(const NodeGenerator<BoolT>& condition_body,
#endif
}
-void CodeStubAssembler::Assert(TNode<Word32T> condition_node,
+void CodeStubAssembler::Dcheck(TNode<Word32T> condition_node,
const char* message, const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes) {
#if defined(DEBUG)
@@ -196,7 +196,7 @@ void CodeStubAssembler::FailAssert(
}
#endif
- AbortCSAAssert(message_node);
+ AbortCSADcheck(message_node);
Unreachable();
}
@@ -315,7 +315,7 @@ bool CodeStubAssembler::TryGetIntPtrOrSmiConstantValue(
TNode<IntPtrT> CodeStubAssembler::IntPtrRoundUpToPowerOfTwo32(
TNode<IntPtrT> value) {
Comment("IntPtrRoundUpToPowerOfTwo32");
- CSA_ASSERT(this, UintPtrLessThanOrEqual(value, IntPtrConstant(0x80000000u)));
+ CSA_DCHECK(this, UintPtrLessThanOrEqual(value, IntPtrConstant(0x80000000u)));
value = Signed(IntPtrSub(value, IntPtrConstant(1)));
for (int i = 1; i <= 16; i *= 2) {
value = Signed(WordOr(value, WordShr(value, IntPtrConstant(i))));
@@ -754,7 +754,7 @@ TNode<Smi> CodeStubAssembler::SmiFromInt32(TNode<Int32T> value) {
}
TNode<Smi> CodeStubAssembler::SmiFromUint32(TNode<Uint32T> value) {
- CSA_ASSERT(this, IntPtrLessThan(ChangeUint32ToWord(value),
+ CSA_DCHECK(this, IntPtrLessThan(ChangeUint32ToWord(value),
IntPtrConstant(Smi::kMaxValue)));
return SmiFromInt32(Signed(value));
}
@@ -1234,8 +1234,9 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
TVARIABLE(Object, result);
Label runtime_call(this, Label::kDeferred), no_runtime_call(this), out(this);
- bool needs_double_alignment = flags & kDoubleAlignment;
- bool allow_large_object_allocation = flags & kAllowLargeObjectAllocation;
+ bool needs_double_alignment = flags & AllocationFlag::kDoubleAlignment;
+ bool allow_large_object_allocation =
+ flags & AllocationFlag::kAllowLargeObjectAllocation;
if (allow_large_object_allocation) {
Label next(this);
@@ -1281,7 +1282,7 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt(
AllocateDoubleAlignFlag::encode(needs_double_alignment) |
AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation)));
- if (flags & kPretenured) {
+ if (flags & AllocationFlag::kPretenured) {
result =
CallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(),
SmiTag(size_in_bytes), runtime_flags);
@@ -1333,7 +1334,7 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
TNode<HeapObject> CodeStubAssembler::AllocateRawUnaligned(
TNode<IntPtrT> size_in_bytes, AllocationFlags flags,
TNode<RawPtrT> top_address, TNode<RawPtrT> limit_address) {
- DCHECK_EQ(flags & kDoubleAlignment, 0);
+ DCHECK_EQ(flags & AllocationFlag::kDoubleAlignment, 0);
return AllocateRaw(size_in_bytes, flags, top_address, limit_address);
}
@@ -1341,8 +1342,8 @@ TNode<HeapObject> CodeStubAssembler::AllocateRawDoubleAligned(
TNode<IntPtrT> size_in_bytes, AllocationFlags flags,
TNode<RawPtrT> top_address, TNode<RawPtrT> limit_address) {
#if defined(V8_HOST_ARCH_32_BIT)
- return AllocateRaw(size_in_bytes, flags | kDoubleAlignment, top_address,
- limit_address);
+ return AllocateRaw(size_in_bytes, flags | AllocationFlag::kDoubleAlignment,
+ top_address, limit_address);
#elif defined(V8_HOST_ARCH_64_BIT)
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): Consider using aligned allocations once the
@@ -1351,8 +1352,8 @@ TNode<HeapObject> CodeStubAssembler::AllocateRawDoubleAligned(
// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
// Allocation on 64 bit machine is naturally double aligned
- return AllocateRaw(size_in_bytes, flags & ~kDoubleAlignment, top_address,
- limit_address);
+ return AllocateRaw(size_in_bytes, flags & ~AllocationFlag::kDoubleAlignment,
+ top_address, limit_address);
#else
#error Architecture not supported
#endif
@@ -1360,17 +1361,19 @@ TNode<HeapObject> CodeStubAssembler::AllocateRawDoubleAligned(
TNode<HeapObject> CodeStubAssembler::AllocateInNewSpace(
TNode<IntPtrT> size_in_bytes, AllocationFlags flags) {
- DCHECK(flags == kNone || flags == kDoubleAlignment);
- CSA_ASSERT(this, IsRegularHeapObjectSize(size_in_bytes));
+ DCHECK(flags == AllocationFlag::kNone ||
+ flags == AllocationFlag::kDoubleAlignment);
+ CSA_DCHECK(this, IsRegularHeapObjectSize(size_in_bytes));
return Allocate(size_in_bytes, flags);
}
TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags) {
Comment("Allocate");
- if (FLAG_single_generation) flags |= kPretenured;
- bool const new_space = !(flags & kPretenured);
- bool const allow_large_objects = flags & kAllowLargeObjectAllocation;
+ if (FLAG_single_generation) flags |= AllocationFlag::kPretenured;
+ bool const new_space = !(flags & AllocationFlag::kPretenured);
+ bool const allow_large_objects =
+ flags & AllocationFlag::kAllowLargeObjectAllocation;
// For optimized allocations, we don't allow the allocation to happen in a
// different generation than requested.
bool const always_allocated_in_requested_space =
@@ -1380,10 +1383,11 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
if (TryToIntPtrConstant(size_in_bytes, &size_constant)) {
CHECK_LE(size_constant, kMaxRegularHeapObjectSize);
} else {
- CSA_ASSERT(this, IsRegularHeapObjectSize(size_in_bytes));
+ CSA_DCHECK(this, IsRegularHeapObjectSize(size_in_bytes));
}
}
- if (!(flags & kDoubleAlignment) && always_allocated_in_requested_space) {
+ if (!(flags & AllocationFlag::kDoubleAlignment) &&
+ always_allocated_in_requested_space) {
return OptimizedAllocate(
size_in_bytes,
new_space ? AllocationType::kYoung : AllocationType::kOld,
@@ -1400,14 +1404,14 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
// kNullAddress.
if (ExternalReference::new_space_allocation_top_address(isolate())
.address() != kNullAddress) {
- Address top_address =
+ Address raw_top_address =
ExternalReference::new_space_allocation_top_address(isolate())
.address();
- Address limit_address =
+ Address raw_limit_address =
ExternalReference::new_space_allocation_limit_address(isolate())
.address();
- CHECK_EQ(kSystemPointerSize, limit_address - top_address);
+ CHECK_EQ(kSystemPointerSize, raw_limit_address - raw_top_address);
}
DCHECK_EQ(kSystemPointerSize,
@@ -1421,7 +1425,7 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
IntPtrAdd(ReinterpretCast<IntPtrT>(top_address),
IntPtrConstant(kSystemPointerSize));
- if (flags & kDoubleAlignment) {
+ if (flags & AllocationFlag::kDoubleAlignment) {
return AllocateRawDoubleAligned(size_in_bytes, flags,
ReinterpretCast<RawPtrT>(top_address),
ReinterpretCast<RawPtrT>(limit_address));
@@ -1434,7 +1438,8 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
TNode<HeapObject> CodeStubAssembler::AllocateInNewSpace(int size_in_bytes,
AllocationFlags flags) {
- CHECK(flags == kNone || flags == kDoubleAlignment);
+ CHECK(flags == AllocationFlag::kNone ||
+ flags == AllocationFlag::kDoubleAlignment);
DCHECK_LE(size_in_bytes, kMaxRegularHeapObjectSize);
return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags);
}
@@ -1678,7 +1683,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(
TNode<Float64T> CodeStubAssembler::LoadHeapNumberValue(
TNode<HeapObject> object) {
- CSA_ASSERT(this, Word32Or(IsHeapNumber(object), IsOddball(object)));
+ CSA_DCHECK(this, Word32Or(IsHeapNumber(object), IsOddball(object)));
STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
return LoadObjectField<Float64T>(object, HeapNumber::kValueOffset);
}
@@ -1694,7 +1699,7 @@ TNode<Map> CodeStubAssembler::LoadMap(TNode<HeapObject> object) {
TNode<Map> map = LoadObjectField<Map>(object, HeapObject::kMapOffset);
#ifdef V8_MAP_PACKING
// Check the loaded map is unpacked. i.e. the lowest two bits != 0b10
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
WordNotEqual(WordAnd(BitcastTaggedToWord(map),
IntPtrConstant(Internals::kMapWordXorMask)),
IntPtrConstant(Internals::kMapWordSignature)));
@@ -1732,7 +1737,7 @@ TNode<BoolT> CodeStubAssembler::IsSpecialReceiverMap(TNode<Map> map) {
Map::Bits1::IsAccessCheckNeededBit::kMask;
USE(mask);
// Interceptors or access checks imply special receiver.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
SelectConstant<BoolT>(IsSetWord32(LoadMapBitField(map), mask),
is_special, Int32TrueConstant()));
return is_special;
@@ -1754,7 +1759,7 @@ void CodeStubAssembler::GotoIfMapHasSlowProperties(TNode<Map> map,
TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
TNode<JSReceiver> object) {
- CSA_SLOW_ASSERT(this, Word32BinaryNot(IsDictionaryMap(LoadMap(object))));
+ CSA_SLOW_DCHECK(this, Word32BinaryNot(IsDictionaryMap(LoadMap(object))));
TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object);
return Select<HeapObject>(
TaggedIsSmi(properties), [=] { return EmptyFixedArrayConstant(); },
@@ -1763,7 +1768,7 @@ TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
TNode<HeapObject> CodeStubAssembler::LoadSlowProperties(
TNode<JSReceiver> object) {
- CSA_SLOW_ASSERT(this, IsDictionaryMap(LoadMap(object)));
+ CSA_SLOW_DCHECK(this, IsDictionaryMap(LoadMap(object)));
TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object);
NodeGenerator<HeapObject> make_empty = [=]() -> TNode<HeapObject> {
if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
@@ -1775,10 +1780,10 @@ TNode<HeapObject> CodeStubAssembler::LoadSlowProperties(
NodeGenerator<HeapObject> cast_properties = [=] {
TNode<HeapObject> dict = CAST(properties);
if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
- CSA_ASSERT(this, Word32Or(IsSwissNameDictionary(dict),
+ CSA_DCHECK(this, Word32Or(IsSwissNameDictionary(dict),
IsGlobalDictionary(dict)));
} else {
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32Or(IsNameDictionary(dict), IsGlobalDictionary(dict)));
}
return dict;
@@ -1789,7 +1794,7 @@ TNode<HeapObject> CodeStubAssembler::LoadSlowProperties(
TNode<Object> CodeStubAssembler::LoadJSArgumentsObjectLength(
TNode<Context> context, TNode<JSArgumentsObject> array) {
- CSA_ASSERT(this, IsJSArgumentsObjectWithLength(context, array));
+ CSA_DCHECK(this, IsJSArgumentsObjectWithLength(context, array));
constexpr int offset = JSStrictArgumentsObject::kLengthOffset;
STATIC_ASSERT(offset == JSSloppyArgumentsObject::kLengthOffset);
return LoadObjectField(array, offset);
@@ -1797,19 +1802,19 @@ TNode<Object> CodeStubAssembler::LoadJSArgumentsObjectLength(
TNode<Smi> CodeStubAssembler::LoadFastJSArrayLength(TNode<JSArray> array) {
TNode<Number> length = LoadJSArrayLength(array);
- CSA_ASSERT(this, Word32Or(IsFastElementsKind(LoadElementsKind(array)),
+ CSA_DCHECK(this, Word32Or(IsFastElementsKind(LoadElementsKind(array)),
IsElementsKindInRange(
LoadElementsKind(array),
FIRST_ANY_NONEXTENSIBLE_ELEMENTS_KIND,
LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND)));
// JSArray length is always a positive Smi for fast arrays.
- CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
+ CSA_SLOW_DCHECK(this, TaggedIsPositiveSmi(length));
return CAST(length);
}
TNode<Smi> CodeStubAssembler::LoadFixedArrayBaseLength(
TNode<FixedArrayBase> array) {
- CSA_SLOW_ASSERT(this, IsNotWeakFixedArraySubclass(array));
+ CSA_SLOW_DCHECK(this, IsNotWeakFixedArraySubclass(array));
return LoadObjectField<Smi>(array, FixedArrayBase::kLengthOffset);
}
@@ -1889,7 +1894,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadMapInstanceSizeInWords(TNode<Map> map) {
TNode<IntPtrT> CodeStubAssembler::LoadMapInobjectPropertiesStartInWords(
TNode<Map> map) {
// See Map::GetInObjectPropertiesStartInWords() for details.
- CSA_ASSERT(this, IsJSObjectMap(map));
+ CSA_DCHECK(this, IsJSObjectMap(map));
return ChangeInt32ToIntPtr(LoadObjectField<Uint8T>(
map, Map::kInobjectPropertiesStartOrConstructorFunctionIndexOffset));
}
@@ -1897,7 +1902,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadMapInobjectPropertiesStartInWords(
TNode<IntPtrT> CodeStubAssembler::LoadMapConstructorFunctionIndex(
TNode<Map> map) {
// See Map::GetConstructorFunctionIndex() for details.
- CSA_ASSERT(this, IsPrimitiveInstanceType(LoadMapInstanceType(map)));
+ CSA_DCHECK(this, IsPrimitiveInstanceType(LoadMapInstanceType(map)));
return ChangeInt32ToIntPtr(LoadObjectField<Uint8T>(
map, Map::kInobjectPropertiesStartOrConstructorFunctionIndexOffset));
}
@@ -2020,7 +2025,7 @@ TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash(
TNode<Uint32T> CodeStubAssembler::LoadNameHashAssumeComputed(TNode<Name> name) {
TNode<Uint32T> hash_field = LoadNameRawHashField(name);
- CSA_ASSERT(this, IsClearWord32(hash_field, Name::kHashNotComputedMask));
+ CSA_DCHECK(this, IsClearWord32(hash_field, Name::kHashNotComputedMask));
return Unsigned(Word32Shr(hash_field, Int32Constant(Name::kHashShift)));
}
@@ -2076,10 +2081,10 @@ void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object,
Goto(if_strong);
}
-void CodeStubAssembler::AssertHasValidMap(TNode<HeapObject> object) {
+void CodeStubAssembler::DcheckHasValidMap(TNode<HeapObject> object) {
#ifdef V8_MAP_PACKING
// Test if the map is an unpacked and valid map
- CSA_ASSERT(this, IsMap(LoadMap(object)));
+ CSA_DCHECK(this, IsMap(LoadMap(object)));
#endif
}
@@ -2110,8 +2115,8 @@ TNode<BoolT> CodeStubAssembler::IsCleared(TNode<MaybeObject> value) {
TNode<HeapObject> CodeStubAssembler::GetHeapObjectAssumeWeak(
TNode<MaybeObject> value) {
- CSA_ASSERT(this, IsWeakOrCleared(value));
- CSA_ASSERT(this, IsNotCleared(value));
+ CSA_DCHECK(this, IsWeakOrCleared(value));
+ CSA_DCHECK(this, IsNotCleared(value));
return UncheckedCast<HeapObject>(BitcastWordToTagged(WordAnd(
BitcastMaybeObjectToWord(value), IntPtrConstant(~kWeakHeapObjectMask))));
}
@@ -2128,7 +2133,7 @@ TNode<HeapObject> CodeStubAssembler::GetHeapObjectAssumeWeak(
// but requires a big constant for ~mask.
TNode<BoolT> CodeStubAssembler::IsWeakReferenceToObject(
TNode<MaybeObject> maybe_object, TNode<Object> value) {
- CSA_ASSERT(this, TaggedIsNotSmi(maybe_object));
+ CSA_DCHECK(this, TaggedIsNotSmi(maybe_object));
if (COMPRESS_POINTERS_BOOL) {
return Word32Equal(
Word32And(TruncateWordToInt32(BitcastMaybeObjectToWord(maybe_object)),
@@ -2202,13 +2207,13 @@ TNode<TValue> CodeStubAssembler::LoadArrayElement(TNode<Array> array,
std::is_same<TIndex, UintPtrT>::value ||
std::is_same<TIndex, IntPtrT>::value,
"Only Smi, UintPtrT or IntPtrT indices are allowed");
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(ParameterToIntPtr(index_node),
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(ParameterToIntPtr(index_node),
IntPtrConstant(0)));
DCHECK(IsAligned(additional_offset, kTaggedSize));
int32_t header_size = array_header_size + additional_offset - kHeapObjectTag;
TNode<IntPtrT> offset =
ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS, header_size);
- CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(array),
+ CSA_DCHECK(this, IsOffsetInBounds(offset, LoadArrayLength(array),
array_header_size));
constexpr MachineType machine_type = MachineTypeOf<TValue>::value;
return UncheckedCast<TValue>(LoadFromObject(machine_type, array, offset));
@@ -2227,8 +2232,8 @@ TNode<Object> CodeStubAssembler::LoadFixedArrayElement(
std::is_same<TIndex, UintPtrT>::value ||
std::is_same<TIndex, IntPtrT>::value,
"Only Smi, UintPtrT or IntPtrT indexes are allowed");
- CSA_ASSERT(this, IsFixedArraySubclass(object));
- CSA_ASSERT(this, IsNotWeakFixedArraySubclass(object));
+ CSA_DCHECK(this, IsFixedArraySubclass(object));
+ CSA_DCHECK(this, IsNotWeakFixedArraySubclass(object));
if (NeedsBoundsCheck(check_bounds)) {
FixedArrayBoundsCheck(object, index, additional_offset);
@@ -2591,7 +2596,7 @@ TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot(
additional_offset - kHeapObjectTag;
TNode<IntPtrT> offset =
ElementOffsetFromIndex(slot, HOLEY_ELEMENTS, header_size);
- CSA_SLOW_ASSERT(
+ CSA_SLOW_DCHECK(
this, IsOffsetInBounds(offset, LoadFeedbackVectorLength(feedback_vector),
FeedbackVector::kHeaderSize));
return Load<MaybeObject>(feedback_vector, offset);
@@ -2620,7 +2625,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement(
endian_correction;
TNode<IntPtrT> offset =
ElementOffsetFromIndex(index, HOLEY_ELEMENTS, header_size);
- CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(object),
+ CSA_DCHECK(this, IsOffsetInBounds(offset, LoadArrayLength(object),
array_header_size + endian_correction));
if (SmiValuesAre32Bits()) {
return Load<Int32T>(object, offset);
@@ -2631,7 +2636,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement(
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
TNode<FixedArray> object, TNode<IntPtrT> index, int additional_offset) {
- CSA_SLOW_ASSERT(this, IsFixedArraySubclass(object));
+ CSA_SLOW_DCHECK(this, IsFixedArraySubclass(object));
return LoadAndUntagToWord32ArrayElement(object, FixedArray::kHeaderSize,
index, additional_offset);
}
@@ -2648,7 +2653,7 @@ TNode<Float64T> CodeStubAssembler::LoadFixedDoubleArrayElement(
int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag;
TNode<IntPtrT> offset =
ElementOffsetFromIndex(index, HOLEY_DOUBLE_ELEMENTS, header_size);
- CSA_ASSERT(this, IsOffsetInBounds(
+ CSA_DCHECK(this, IsOffsetInBounds(
offset, LoadAndUntagFixedArrayBaseLength(object),
FixedDoubleArray::kHeaderSize, HOLEY_DOUBLE_ELEMENTS));
return LoadDoubleWithHoleCheck(object, offset, if_hole, machine_type);
@@ -2710,7 +2715,7 @@ TNode<Object> CodeStubAssembler::LoadFixedArrayBaseElementAsTagged(
BIND(&if_dictionary);
{
- CSA_ASSERT(this, IsDictionaryElementsKind(elements_kind));
+ CSA_DCHECK(this, IsDictionaryElementsKind(elements_kind));
var_result = BasicLoadNumberDictionaryElement(CAST(elements), index,
if_accessor, if_hole);
Goto(&done);
@@ -2787,7 +2792,7 @@ TNode<Context> CodeStubAssembler::LoadModuleContext(TNode<Context> context) {
Goto(&context_search);
BIND(&context_search);
{
- CSA_ASSERT(this, Word32BinaryNot(
+ CSA_DCHECK(this, Word32BinaryNot(
TaggedEqual(cur_context.value(), native_context)));
GotoIf(TaggedEqual(LoadMap(CAST(cur_context.value())), module_map),
&context_found);
@@ -2836,7 +2841,7 @@ TNode<Map> CodeStubAssembler::LoadSlowObjectWithNullPrototypeMap(
TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap(
TNode<Int32T> kind, TNode<NativeContext> native_context) {
- CSA_ASSERT(this, IsFastElementsKind(kind));
+ CSA_DCHECK(this, IsFastElementsKind(kind));
TNode<IntPtrT> offset =
IntPtrAdd(IntPtrConstant(Context::FIRST_JS_ARRAY_MAP_SLOT),
ChangeInt32ToIntPtr(kind));
@@ -2897,8 +2902,8 @@ void CodeStubAssembler::GotoIfPrototypeRequiresRuntimeLookup(
TNode<HeapObject> CodeStubAssembler::LoadJSFunctionPrototype(
TNode<JSFunction> function, Label* if_bailout) {
- CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(function)));
- CSA_ASSERT(this, IsClearWord32<Map::Bits1::HasNonInstancePrototypeBit>(
+ CSA_DCHECK(this, IsFunctionWithPrototypeSlotMap(LoadMap(function)));
+ CSA_DCHECK(this, IsClearWord32<Map::Bits1::HasNonInstancePrototypeBit>(
LoadMapBitField(LoadMap(function))));
TNode<HeapObject> proto_or_map = LoadObjectField<HeapObject>(
function, JSFunction::kPrototypeOrInitialMapOffset);
@@ -2929,7 +2934,7 @@ TNode<BytecodeArray> CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(
&check_for_interpreter_data);
{
TNode<Code> code = FromCodeT(CAST(var_result.value()));
- CSA_ASSERT(
+ CSA_DCHECK(
this, Word32Equal(DecodeWord32<Code::KindField>(LoadObjectField<Int32T>(
code, Code::kFlagsOffset)),
Int32Constant(static_cast<int>(CodeKind::BASELINE))));
@@ -3001,7 +3006,7 @@ void CodeStubAssembler::UnsafeStoreObjectFieldNoWriteBarrier(
void CodeStubAssembler::StoreMap(TNode<HeapObject> object, TNode<Map> map) {
OptimizedStoreMap(object, map);
- AssertHasValidMap(object);
+ DcheckHasValidMap(object);
}
void CodeStubAssembler::StoreMapNoWriteBarrier(TNode<HeapObject> object,
@@ -3012,7 +3017,7 @@ void CodeStubAssembler::StoreMapNoWriteBarrier(TNode<HeapObject> object,
void CodeStubAssembler::StoreMapNoWriteBarrier(TNode<HeapObject> object,
TNode<Map> map) {
OptimizedStoreMap(object, map);
- AssertHasValidMap(object);
+ DcheckHasValidMap(object);
}
void CodeStubAssembler::StoreObjectFieldRoot(TNode<HeapObject> object,
@@ -3053,7 +3058,7 @@ void CodeStubAssembler::StoreFixedArrayOrPropertyArrayElement(
static_cast<int>(PropertyArray::kLengthAndHashOffset));
// Check that index_node + additional_offset <= object.length.
// TODO(cbruni): Use proper LoadXXLength helpers
- CSA_ASSERT(
+ CSA_DCHECK(
this,
IsOffsetInBounds(
offset,
@@ -3134,7 +3139,7 @@ void CodeStubAssembler::StoreFeedbackVectorSlot(
TNode<IntPtrT> offset =
ElementOffsetFromIndex(Signed(slot), HOLEY_ELEMENTS, header_size);
// Check that slot <= feedback_vector.length.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
IsOffsetInBounds(offset, LoadFeedbackVectorLength(feedback_vector),
FeedbackVector::kHeaderSize),
SmiFromIntPtr(offset), feedback_vector);
@@ -3275,7 +3280,7 @@ void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
TNode<Cell> CodeStubAssembler::AllocateCellWithValue(TNode<Object> value,
WriteBarrierMode mode) {
- TNode<HeapObject> result = Allocate(Cell::kSize, kNone);
+ TNode<HeapObject> result = Allocate(Cell::kSize, AllocationFlag::kNone);
StoreMapNoWriteBarrier(result, RootIndex::kCellMap);
TNode<Cell> cell = CAST(result);
StoreCellValue(cell, value, mode);
@@ -3298,7 +3303,7 @@ void CodeStubAssembler::StoreCellValue(TNode<Cell> cell, TNode<Object> value,
}
TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumber() {
- TNode<HeapObject> result = Allocate(HeapNumber::kSize, kNone);
+ TNode<HeapObject> result = Allocate(HeapNumber::kSize, AllocationFlag::kNone);
RootIndex heap_map_index = RootIndex::kHeapNumberMap;
StoreMapNoWriteBarrier(result, heap_map_index);
return UncheckedCast<HeapNumber>(result);
@@ -3343,7 +3348,8 @@ TNode<BigInt> CodeStubAssembler::AllocateRawBigInt(TNode<IntPtrT> length) {
TNode<IntPtrT> size =
IntPtrAdd(IntPtrConstant(BigInt::kHeaderSize),
Signed(WordShl(length, kSystemPointerSizeLog2)));
- TNode<HeapObject> raw_result = Allocate(size, kAllowLargeObjectAllocation);
+ TNode<HeapObject> raw_result =
+ Allocate(size, AllocationFlag::kAllowLargeObjectAllocation);
StoreMapNoWriteBarrier(raw_result, RootIndex::kBigIntMap);
if (FIELD_SIZE(BigInt::kOptionalPaddingOffset) != 0) {
DCHECK_EQ(4, FIELD_SIZE(BigInt::kOptionalPaddingOffset));
@@ -3403,7 +3409,7 @@ TNode<UintPtrT> CodeStubAssembler::LoadBigIntDigit(TNode<BigInt> bigint,
TNode<ByteArray> CodeStubAssembler::AllocateNonEmptyByteArray(
TNode<UintPtrT> length, AllocationFlags flags) {
- CSA_ASSERT(this, WordNotEqual(length, IntPtrConstant(0)));
+ CSA_DCHECK(this, WordNotEqual(length, IntPtrConstant(0)));
Comment("AllocateNonEmptyByteArray");
TVARIABLE(Object, var_result);
@@ -3551,7 +3557,7 @@ TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionary(
TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionary(
TNode<IntPtrT> at_least_space_for, AllocationFlags flags) {
- CSA_ASSERT(this, UintPtrLessThanOrEqual(
+ CSA_DCHECK(this, UintPtrLessThanOrEqual(
at_least_space_for,
IntPtrConstant(NameDictionary::kMaxCapacity)));
TNode<IntPtrT> capacity = HashTableComputeCapacity(at_least_space_for);
@@ -3560,8 +3566,8 @@ TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionary(
TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionaryWithCapacity(
TNode<IntPtrT> capacity, AllocationFlags flags) {
- CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
- CSA_ASSERT(this, IntPtrGreaterThan(capacity, IntPtrConstant(0)));
+ CSA_DCHECK(this, WordIsPowerOfTwo(capacity));
+ CSA_DCHECK(this, IntPtrGreaterThan(capacity, IntPtrConstant(0)));
TNode<IntPtrT> length = EntryToIndex<NameDictionary>(capacity);
TNode<IntPtrT> store_size = IntPtrAdd(
TimesTaggedSize(length), IntPtrConstant(NameDictionary::kHeaderSize));
@@ -3619,7 +3625,7 @@ TNode<NameDictionary> CodeStubAssembler::CopyNameDictionary(
TNode<NameDictionary> dictionary, Label* large_object_fallback) {
Comment("Copy boilerplate property dict");
TNode<IntPtrT> capacity = SmiUntag(GetCapacity<NameDictionary>(dictionary));
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(capacity, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(capacity, IntPtrConstant(0)));
GotoIf(UintPtrGreaterThan(
capacity, IntPtrConstant(NameDictionary::kMaxRegularCapacity)),
large_object_fallback);
@@ -3643,11 +3649,11 @@ TNode<CollectionType> CodeStubAssembler::AllocateOrderedHashTable(
template <typename CollectionType>
TNode<CollectionType> CodeStubAssembler::AllocateOrderedHashTableWithCapacity(
TNode<IntPtrT> capacity) {
- CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
- CSA_ASSERT(this,
+ CSA_DCHECK(this, WordIsPowerOfTwo(capacity));
+ CSA_DCHECK(this,
IntPtrGreaterThanOrEqual(
capacity, IntPtrConstant(CollectionType::kInitialCapacity)));
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
IntPtrLessThanOrEqual(
capacity, IntPtrConstant(CollectionType::MaxCapacity())));
@@ -3665,9 +3671,9 @@ TNode<CollectionType> CodeStubAssembler::AllocateOrderedHashTableWithCapacity(
const ElementsKind elements_kind = HOLEY_ELEMENTS;
TNode<Map> fixed_array_map =
HeapConstant(CollectionType::GetMap(ReadOnlyRoots(isolate())));
- TNode<CollectionType> table =
- CAST(AllocateFixedArray(elements_kind, fixed_array_length,
- kAllowLargeObjectAllocation, fixed_array_map));
+ TNode<CollectionType> table = CAST(AllocateFixedArray(
+ elements_kind, fixed_array_length,
+ AllocationFlag::kAllowLargeObjectAllocation, fixed_array_map));
Comment("Initialize the OrderedHashTable fields.");
const WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER;
@@ -3742,8 +3748,8 @@ TNode<CollectionType> CodeStubAssembler::AllocateOrderedHashTableWithCapacity(
TimesTaggedSize(IntPtrMul(
capacity, IntPtrConstant(CollectionType::kEntrySize))));
- CSA_ASSERT(this, IntPtrEqual(ptr_diff, TimesTaggedSize(array_data_fields)));
- CSA_ASSERT(this, IntPtrEqual(expected_end, data_end_address));
+ CSA_DCHECK(this, IntPtrEqual(ptr_diff, TimesTaggedSize(array_data_fields)));
+ CSA_DCHECK(this, IntPtrEqual(expected_end, data_end_address));
#endif
}
@@ -3779,8 +3785,8 @@ TNode<JSObject> CodeStubAssembler::AllocateJSObjectFromMap(
TNode<Map> map, base::Optional<TNode<HeapObject>> properties,
base::Optional<TNode<FixedArray>> elements, AllocationFlags flags,
SlackTrackingMode slack_tracking_mode) {
- CSA_ASSERT(this, Word32BinaryNot(IsJSFunctionMap(map)));
- CSA_ASSERT(this, Word32BinaryNot(InstanceTypeEqual(LoadMapInstanceType(map),
+ CSA_DCHECK(this, Word32BinaryNot(IsJSFunctionMap(map)));
+ CSA_DCHECK(this, Word32BinaryNot(InstanceTypeEqual(LoadMapInstanceType(map),
JS_GLOBAL_OBJECT_TYPE)));
TNode<IntPtrT> instance_size =
TimesTaggedSize(LoadMapInstanceSizeInWords(map));
@@ -3799,11 +3805,11 @@ void CodeStubAssembler::InitializeJSObjectFromMap(
// This helper assumes that the object is in new-space, as guarded by the
// check in AllocatedJSObjectFromMap.
if (!properties) {
- CSA_ASSERT(this, Word32BinaryNot(IsDictionaryMap((map))));
+ CSA_DCHECK(this, Word32BinaryNot(IsDictionaryMap((map))));
StoreObjectFieldRoot(object, JSObject::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
} else {
- CSA_ASSERT(this, Word32Or(Word32Or(Word32Or(IsPropertyArray(*properties),
+ CSA_DCHECK(this, Word32Or(Word32Or(Word32Or(IsPropertyArray(*properties),
IsNameDictionary(*properties)),
IsSwissNameDictionary(*properties)),
IsEmptyFixedArray(*properties)));
@@ -3829,7 +3835,7 @@ void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
TNode<HeapObject> object, TNode<Map> map, TNode<IntPtrT> instance_size,
int start_offset) {
STATIC_ASSERT(Map::kNoSlackTracking == 0);
- CSA_ASSERT(this, IsClearWord32<Map::Bits3::ConstructionCounterBits>(
+ CSA_DCHECK(this, IsClearWord32<Map::Bits3::ConstructionCounterBits>(
LoadMapBitField3(map)));
InitializeFieldsWithRoot(object, IntPtrConstant(start_offset), instance_size,
RootIndex::kUndefinedValue);
@@ -3854,7 +3860,7 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
{
Comment("Decrease construction counter");
// Slack tracking is only done on initial maps.
- CSA_ASSERT(this, IsUndefined(LoadMapBackPointer(map)));
+ CSA_DCHECK(this, IsUndefined(LoadMapBackPointer(map)));
STATIC_ASSERT(Map::Bits3::ConstructionCounterBits::kLastUsedBit == 31);
TNode<Word32T> new_bit_field3 = Int32Sub(
bit_field3,
@@ -3899,8 +3905,8 @@ void CodeStubAssembler::StoreFieldsNoWriteBarrier(TNode<IntPtrT> start_address,
TNode<IntPtrT> end_address,
TNode<Object> value) {
Comment("StoreFieldsNoWriteBarrier");
- CSA_ASSERT(this, WordIsAligned(start_address, kTaggedSize));
- CSA_ASSERT(this, WordIsAligned(end_address, kTaggedSize));
+ CSA_DCHECK(this, WordIsAligned(start_address, kTaggedSize));
+ CSA_DCHECK(this, WordIsAligned(end_address, kTaggedSize));
BuildFastLoop<IntPtrT>(
start_address, end_address,
[=](TNode<IntPtrT> current) {
@@ -3911,7 +3917,7 @@ void CodeStubAssembler::StoreFieldsNoWriteBarrier(TNode<IntPtrT> start_address,
}
void CodeStubAssembler::MakeFixedArrayCOW(TNode<FixedArray> array) {
- CSA_ASSERT(this, IsFixedArrayMap(LoadMap(array)));
+ CSA_DCHECK(this, IsFixedArrayMap(LoadMap(array)));
Label done(this);
// The empty fixed array is not modifiable anyway. And we shouldn't change its
// Map.
@@ -3932,7 +3938,7 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
base::Optional<TNode<AllocationSite>> allocation_site,
int array_header_size) {
Comment("begin allocation of JSArray passing in elements");
- CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
+ CSA_SLOW_DCHECK(this, TaggedIsPositiveSmi(length));
int base_size = array_header_size;
if (allocation_site) {
@@ -3968,8 +3974,8 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
TNode<IntPtrT> capacity, AllocationFlags allocation_flags,
int array_header_size) {
Comment("begin allocation of JSArray with elements");
- CHECK_EQ(allocation_flags & ~kAllowLargeObjectAllocation, 0);
- CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
+ CHECK_EQ(allocation_flags & ~AllocationFlag::kAllowLargeObjectAllocation, 0);
+ CSA_SLOW_DCHECK(this, TaggedIsPositiveSmi(length));
TVARIABLE(JSArray, array);
TVARIABLE(FixedArrayBase, elements);
@@ -4018,7 +4024,7 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
// folding trick. Instead, we first allocate the elements in large object
// space, and then allocate the JSArray (and possibly the allocation
// memento) in new space.
- if (allocation_flags & kAllowLargeObjectAllocation) {
+ if (allocation_flags & AllocationFlag::kAllowLargeObjectAllocation) {
Label next(this);
GotoIf(IsRegularHeapObjectSize(size), &next);
@@ -4060,7 +4066,7 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
DCHECK(RootsTable::IsImmortalImmovable(elements_map_index));
StoreMapNoWriteBarrier(elements.value(), elements_map_index);
- CSA_ASSERT(this, WordNotEqual(capacity, IntPtrConstant(0)));
+ CSA_DCHECK(this, WordNotEqual(capacity, IntPtrConstant(0)));
TNode<Smi> capacity_smi = SmiTag(capacity);
StoreObjectFieldNoWriteBarrier(elements.value(), FixedArray::kLengthOffset,
capacity_smi);
@@ -4075,7 +4081,7 @@ TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
TNode<Map> array_map, TNode<Smi> length,
base::Optional<TNode<AllocationSite>> allocation_site,
TNode<IntPtrT> size_in_bytes) {
- CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
+ CSA_SLOW_DCHECK(this, TaggedIsPositiveSmi(length));
// Allocate space for the JSArray and the elements FixedArray in one go.
TNode<HeapObject> array = AllocateInNewSpace(size_in_bytes);
@@ -4098,7 +4104,7 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
ElementsKind kind, TNode<Map> array_map, TNode<IntPtrT> capacity,
TNode<Smi> length, base::Optional<TNode<AllocationSite>> allocation_site,
AllocationFlags allocation_flags) {
- CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
+ CSA_SLOW_DCHECK(this, TaggedIsPositiveSmi(length));
TNode<JSArray> array;
TNode<FixedArrayBase> elements;
@@ -4205,7 +4211,7 @@ TNode<JSArray> CodeStubAssembler::CloneFastJSArray(
BIND(&allocate_jsarray);
// Handle any nonextensible elements kinds
- CSA_ASSERT(this, IsElementsKindLessThanOrEqual(
+ CSA_DCHECK(this, IsElementsKindLessThanOrEqual(
var_elements_kind.value(),
LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND));
GotoIf(IsElementsKindLessThanOrEqual(var_elements_kind.value(),
@@ -4233,7 +4239,7 @@ TNode<FixedArrayBase> CodeStubAssembler::AllocateFixedArray(
std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
"Only Smi or IntPtrT capacity is allowed");
Comment("AllocateFixedArray");
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
IntPtrOrSmiGreaterThan(capacity, IntPtrOrSmiConstant<TIndex>(0)));
const intptr_t kMaxLength = IsDoubleElementsKind(kind)
@@ -4258,7 +4264,7 @@ TNode<FixedArrayBase> CodeStubAssembler::AllocateFixedArray(
TNode<IntPtrT> total_size = GetFixedArrayAllocationSize(capacity, kind);
- if (IsDoubleElementsKind(kind)) flags |= kDoubleAlignment;
+ if (IsDoubleElementsKind(kind)) flags |= AllocationFlag::kDoubleAlignment;
// Allocate both array and elements object, and initialize the JSArray.
TNode<HeapObject> array = Allocate(total_size, flags);
if (fixed_array_map) {
@@ -4268,7 +4274,7 @@ TNode<FixedArrayBase> CodeStubAssembler::AllocateFixedArray(
// need the write barrier even in LOS, but it's better to not take chances
// in case this invariant changes later, since it's difficult to enforce
// locally here.
- if (flags == CodeStubAssembler::kNone) {
+ if (flags == AllocationFlag::kNone) {
StoreMapNoWriteBarrier(array, *fixed_array_map);
} else {
StoreMap(array, *fixed_array_map);
@@ -4304,9 +4310,9 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
"Only Smi or IntPtrT first, count, and capacity are allowed");
DCHECK(extract_flags & ExtractFixedArrayFlag::kFixedArrays);
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
IntPtrOrSmiNotEqual(IntPtrOrSmiConstant<TIndex>(0), capacity));
- CSA_ASSERT(this, TaggedEqual(source_map, LoadMap(source)));
+ CSA_DCHECK(this, TaggedEqual(source_map, LoadMap(source)));
TVARIABLE(FixedArrayBase, var_result);
TVARIABLE(Map, var_target_map, source_map);
@@ -4318,11 +4324,11 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
// we can't just use COW, use FixedArrayMap as the target map. Otherwise, use
// source_map as the target map.
if (IsDoubleElementsKind(from_kind)) {
- CSA_ASSERT(this, IsFixedDoubleArrayMap(source_map));
+ CSA_DCHECK(this, IsFixedDoubleArrayMap(source_map));
var_target_map = FixedArrayMapConstant();
Goto(&new_space_check);
} else {
- CSA_ASSERT(this, Word32BinaryNot(IsFixedDoubleArrayMap(source_map)));
+ CSA_DCHECK(this, Word32BinaryNot(IsFixedDoubleArrayMap(source_map)));
Branch(TaggedEqual(var_target_map.value(), FixedCOWArrayMapConstant()),
&is_cow, &new_space_check);
@@ -4376,7 +4382,7 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
TNode<IntPtrT> object_page = PageFromAddress(object_word);
TNode<IntPtrT> page_flags =
Load<IntPtrT>(object_page, IntPtrConstant(Page::kFlagsOffset));
- CSA_ASSERT(
+ CSA_DCHECK(
this,
WordNotEqual(
WordAnd(page_flags,
@@ -4462,7 +4468,7 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
"Only Smi or IntPtrT first, count, and capacity are allowed");
DCHECK_NE(var_holes_converted, nullptr);
- CSA_ASSERT(this, IsFixedDoubleArrayMap(fixed_array_map));
+ CSA_DCHECK(this, IsFixedDoubleArrayMap(fixed_array_map));
TVARIABLE(FixedArrayBase, var_result);
const ElementsKind kind = PACKED_DOUBLE_ELEMENTS;
@@ -4475,7 +4481,7 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles(
// The construction of the loop and the offsets for double elements is
// extracted from CopyFixedArrayElements.
- CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(from_array, kind));
+ CSA_SLOW_DCHECK(this, IsFixedArrayWithKindOrEmpty(from_array, kind));
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
Comment("[ ExtractFixedDoubleArrayFillingHoles");
@@ -4556,7 +4562,7 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
var_holes_converted != nullptr ? HoleConversionMode::kConvertToUndefined
: HoleConversionMode::kDontConvert;
TVARIABLE(FixedArrayBase, var_result);
- auto allocation_flags = CodeStubAssembler::kAllowLargeObjectAllocation;
+ auto allocation_flags = AllocationFlag::kAllowLargeObjectAllocation;
if (!first) {
first = IntPtrOrSmiConstant<TIndex>(0);
}
@@ -4564,13 +4570,13 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
count = IntPtrOrSmiSub(
TaggedToParameter<TIndex>(LoadFixedArrayBaseLength(source)), *first);
- CSA_ASSERT(this, IntPtrOrSmiLessThanOrEqual(IntPtrOrSmiConstant<TIndex>(0),
+ CSA_DCHECK(this, IntPtrOrSmiLessThanOrEqual(IntPtrOrSmiConstant<TIndex>(0),
*count));
}
if (!capacity) {
capacity = *count;
} else {
- CSA_ASSERT(this, Word32BinaryNot(IntPtrOrSmiGreaterThan(
+ CSA_DCHECK(this, Word32BinaryNot(IntPtrOrSmiGreaterThan(
IntPtrOrSmiAdd(*first, *count), *capacity)));
}
@@ -4582,7 +4588,7 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) {
GotoIf(IsFixedDoubleArrayMap(source_map), &if_fixed_double_array);
} else {
- CSA_ASSERT(this, IsFixedDoubleArrayMap(source_map));
+ CSA_DCHECK(this, IsFixedDoubleArrayMap(source_map));
}
}
@@ -4649,8 +4655,8 @@ CodeStubAssembler::ExtractFixedArray<IntPtrT>(
void CodeStubAssembler::InitializePropertyArrayLength(
TNode<PropertyArray> property_array, TNode<IntPtrT> length) {
- CSA_ASSERT(this, IntPtrGreaterThan(length, IntPtrConstant(0)));
- CSA_ASSERT(this,
+ CSA_DCHECK(this, IntPtrGreaterThan(length, IntPtrConstant(0)));
+ CSA_DCHECK(this,
IntPtrLessThanOrEqual(
length, IntPtrConstant(PropertyArray::LengthField::kMax)));
StoreObjectFieldNoWriteBarrier(
@@ -4659,10 +4665,10 @@ void CodeStubAssembler::InitializePropertyArrayLength(
TNode<PropertyArray> CodeStubAssembler::AllocatePropertyArray(
TNode<IntPtrT> capacity) {
- CSA_ASSERT(this, IntPtrGreaterThan(capacity, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrGreaterThan(capacity, IntPtrConstant(0)));
TNode<IntPtrT> total_size = GetPropertyArrayAllocationSize(capacity);
- TNode<HeapObject> array = Allocate(total_size, kNone);
+ TNode<HeapObject> array = Allocate(total_size, AllocationFlag::kNone);
RootIndex map_index = RootIndex::kPropertyArrayMap;
DCHECK(RootsTable::IsImmortalImmovable(map_index));
StoreMapNoWriteBarrier(array, map_index);
@@ -4693,7 +4699,7 @@ void CodeStubAssembler::FillFixedArrayWithValue(ElementsKind kind,
static_assert(
std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
"Only Smi or IntPtrT from and to are allowed");
- CSA_SLOW_ASSERT(this, IsFixedArrayWithKind(array, kind));
+ CSA_SLOW_DCHECK(this, IsFixedArrayWithKind(array, kind));
DCHECK(value_root_index == RootIndex::kTheHoleValue ||
value_root_index == RootIndex::kUndefinedValue);
@@ -4752,7 +4758,7 @@ void CodeStubAssembler::StoreFixedDoubleArrayHole(TNode<FixedDoubleArray> array,
TNode<IntPtrT> index) {
TNode<IntPtrT> offset = ElementOffsetFromIndex(
index, PACKED_DOUBLE_ELEMENTS, FixedArray::kHeaderSize - kHeapObjectTag);
- CSA_ASSERT(this, IsOffsetInBounds(
+ CSA_DCHECK(this, IsOffsetInBounds(
offset, LoadAndUntagFixedArrayBaseLength(array),
FixedDoubleArray::kHeaderSize, PACKED_DOUBLE_ELEMENTS));
StoreDoubleHole(array, offset);
@@ -4760,10 +4766,10 @@ void CodeStubAssembler::StoreFixedDoubleArrayHole(TNode<FixedDoubleArray> array,
void CodeStubAssembler::FillFixedArrayWithSmiZero(TNode<FixedArray> array,
TNode<IntPtrT> length) {
- CSA_ASSERT(this, WordEqual(length, LoadAndUntagFixedArrayBaseLength(array)));
+ CSA_DCHECK(this, WordEqual(length, LoadAndUntagFixedArrayBaseLength(array)));
TNode<IntPtrT> byte_length = TimesTaggedSize(length);
- CSA_ASSERT(this, UintPtrLessThan(length, byte_length));
+ CSA_DCHECK(this, UintPtrLessThan(length, byte_length));
static const int32_t fa_base_data_offset =
FixedArray::kHeaderSize - kHeapObjectTag;
@@ -4782,10 +4788,10 @@ void CodeStubAssembler::FillFixedArrayWithSmiZero(TNode<FixedArray> array,
void CodeStubAssembler::FillFixedDoubleArrayWithZero(
TNode<FixedDoubleArray> array, TNode<IntPtrT> length) {
- CSA_ASSERT(this, WordEqual(length, LoadAndUntagFixedArrayBaseLength(array)));
+ CSA_DCHECK(this, WordEqual(length, LoadAndUntagFixedArrayBaseLength(array)));
TNode<IntPtrT> byte_length = TimesDoubleSize(length);
- CSA_ASSERT(this, UintPtrLessThan(length, byte_length));
+ CSA_DCHECK(this, UintPtrLessThan(length, byte_length));
static const int32_t fa_base_data_offset =
FixedDoubleArray::kHeaderSize - kHeapObjectTag;
@@ -4832,11 +4838,11 @@ void CodeStubAssembler::MoveElements(ElementsKind kind,
#endif // V8_DISABLE_WRITE_BARRIERS
DCHECK(IsFastElementsKind(kind));
- CSA_ASSERT(this, IsFixedArrayWithKind(elements, kind));
- CSA_ASSERT(this,
+ CSA_DCHECK(this, IsFixedArrayWithKind(elements, kind));
+ CSA_DCHECK(this,
IntPtrLessThanOrEqual(IntPtrAdd(dst_index, length),
LoadAndUntagFixedArrayBaseLength(elements)));
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
IntPtrLessThanOrEqual(IntPtrAdd(src_index, length),
LoadAndUntagFixedArrayBaseLength(elements)));
@@ -4921,15 +4927,15 @@ void CodeStubAssembler::CopyElements(ElementsKind kind,
#endif // V8_DISABLE_WRITE_BARRIERS
DCHECK(IsFastElementsKind(kind));
- CSA_ASSERT(this, IsFixedArrayWithKind(dst_elements, kind));
- CSA_ASSERT(this, IsFixedArrayWithKind(src_elements, kind));
- CSA_ASSERT(this, IntPtrLessThanOrEqual(
+ CSA_DCHECK(this, IsFixedArrayWithKind(dst_elements, kind));
+ CSA_DCHECK(this, IsFixedArrayWithKind(src_elements, kind));
+ CSA_DCHECK(this, IntPtrLessThanOrEqual(
IntPtrAdd(dst_index, length),
LoadAndUntagFixedArrayBaseLength(dst_elements)));
- CSA_ASSERT(this, IntPtrLessThanOrEqual(
+ CSA_DCHECK(this, IntPtrLessThanOrEqual(
IntPtrAdd(src_index, length),
LoadAndUntagFixedArrayBaseLength(src_elements)));
- CSA_ASSERT(this, Word32Or(TaggedNotEqual(dst_elements, src_elements),
+ CSA_DCHECK(this, Word32Or(TaggedNotEqual(dst_elements, src_elements),
IntPtrEqual(length, IntPtrConstant(0))));
// The write barrier can be ignored if {dst_elements} is in new space, or if
@@ -4997,8 +5003,8 @@ void CodeStubAssembler::CopyFixedArrayElements(
HoleConversionMode convert_holes, TVariable<BoolT>* var_holes_converted) {
DCHECK_IMPLIES(var_holes_converted != nullptr,
convert_holes == HoleConversionMode::kConvertToUndefined);
- CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(from_array, from_kind));
- CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(to_array, to_kind));
+ CSA_SLOW_DCHECK(this, IsFixedArrayWithKindOrEmpty(from_array, from_kind));
+ CSA_SLOW_DCHECK(this, IsFixedArrayWithKindOrEmpty(to_array, to_kind));
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
static_assert(
std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
@@ -5185,7 +5191,7 @@ void CodeStubAssembler::CopyPropertyArrayValues(TNode<HeapObject> from_array,
TNode<IntPtrT> property_count,
WriteBarrierMode barrier_mode,
DestroySource destroy_source) {
- CSA_SLOW_ASSERT(this, Word32Or(IsPropertyArray(from_array),
+ CSA_SLOW_DCHECK(this, Word32Or(IsPropertyArray(from_array),
IsEmptyFixedArray(from_array)));
Comment("[ CopyPropertyArrayValues");
@@ -5243,7 +5249,7 @@ template <>
TNode<Object> CodeStubAssembler::LoadElementAndPrepareForStore(
TNode<FixedArrayBase> array, TNode<IntPtrT> offset, ElementsKind from_kind,
ElementsKind to_kind, Label* if_hole) {
- CSA_ASSERT(this, IsFixedArrayWithKind(array, from_kind));
+ CSA_DCHECK(this, IsFixedArrayWithKind(array, from_kind));
DCHECK(!IsDoubleElementsKind(to_kind));
if (IsDoubleElementsKind(from_kind)) {
TNode<Float64T> value =
@@ -5262,7 +5268,7 @@ template <>
TNode<Float64T> CodeStubAssembler::LoadElementAndPrepareForStore(
TNode<FixedArrayBase> array, TNode<IntPtrT> offset, ElementsKind from_kind,
ElementsKind to_kind, Label* if_hole) {
- CSA_ASSERT(this, IsFixedArrayWithKind(array, from_kind));
+ CSA_DCHECK(this, IsFixedArrayWithKind(array, from_kind));
DCHECK(IsDoubleElementsKind(to_kind));
if (IsDoubleElementsKind(from_kind)) {
return LoadDoubleWithHoleCheck(array, offset, if_hole,
@@ -5301,7 +5307,7 @@ template V8_EXPORT_PRIVATE TNode<Smi>
TNode<FixedArrayBase> CodeStubAssembler::TryGrowElementsCapacity(
TNode<HeapObject> object, TNode<FixedArrayBase> elements, ElementsKind kind,
TNode<Smi> key, Label* bailout) {
- CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind));
+ CSA_SLOW_DCHECK(this, IsFixedArrayWithKindOrEmpty(elements, kind));
TNode<Smi> capacity = LoadFixedArrayBaseLength(elements);
return TryGrowElementsCapacity(object, elements, kind,
@@ -5317,7 +5323,7 @@ TNode<FixedArrayBase> CodeStubAssembler::TryGrowElementsCapacity(
std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
"Only Smi or IntPtrT key and capacity nodes are allowed");
Comment("TryGrowElementsCapacity");
- CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind));
+ CSA_SLOW_DCHECK(this, IsFixedArrayWithKindOrEmpty(elements, kind));
// If the gap growth is too big, fall back to the runtime.
TNode<TIndex> max_gap = IntPtrOrSmiConstant<TIndex>(JSObject::kMaxGap);
@@ -5341,7 +5347,7 @@ TNode<FixedArrayBase> CodeStubAssembler::GrowElementsCapacity(
std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
"Only Smi or IntPtrT capacities are allowed");
Comment("[ GrowElementsCapacity");
- CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, from_kind));
+ CSA_SLOW_DCHECK(this, IsFixedArrayWithKindOrEmpty(elements, from_kind));
// If size of the allocation for the new capacity doesn't fit in a page
// that we can bump-pointer allocate from, fall back to the runtime.
@@ -5539,7 +5545,7 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
// We do not require an Or with earlier feedback here because once we
// convert the value to a Numeric, we cannot reach this path. We can
// only reach this path on the first pass when the feedback is kNone.
- CSA_ASSERT(this, SmiEqual(var_feedback->value(),
+ CSA_DCHECK(this, SmiEqual(var_feedback->value(),
SmiConstant(BinaryOperationFeedback::kNone)));
}
GotoIf(InstanceTypeEqual(instance_type, ODDBALL_TYPE), &is_oddball);
@@ -5917,11 +5923,11 @@ TNode<WordT> CodeStubAssembler::TimesDoubleSize(TNode<WordT> value) {
}
TNode<Object> CodeStubAssembler::ToThisValue(TNode<Context> context,
- TNode<Object> value,
+ TNode<Object> input_value,
PrimitiveType primitive_type,
char const* method_name) {
// We might need to loop once due to JSPrimitiveWrapper unboxing.
- TVARIABLE(Object, var_value, value);
+ TVARIABLE(Object, var_value, input_value);
Label loop(this, &var_value), done_loop(this),
done_throw(this, Label::kDeferred);
Goto(&loop);
@@ -6311,7 +6317,7 @@ TNode<BoolT> CodeStubAssembler::IsStringInstanceType(
TNode<BoolT> CodeStubAssembler::IsOneByteStringInstanceType(
TNode<Int32T> instance_type) {
- CSA_ASSERT(this, IsStringInstanceType(instance_type));
+ CSA_DCHECK(this, IsStringInstanceType(instance_type));
return Word32Equal(
Word32And(instance_type, Int32Constant(kStringEncodingMask)),
Int32Constant(kOneByteStringTag));
@@ -6319,7 +6325,7 @@ TNode<BoolT> CodeStubAssembler::IsOneByteStringInstanceType(
TNode<BoolT> CodeStubAssembler::IsSequentialStringInstanceType(
TNode<Int32T> instance_type) {
- CSA_ASSERT(this, IsStringInstanceType(instance_type));
+ CSA_DCHECK(this, IsStringInstanceType(instance_type));
return Word32Equal(
Word32And(instance_type, Int32Constant(kStringRepresentationMask)),
Int32Constant(kSeqStringTag));
@@ -6327,7 +6333,7 @@ TNode<BoolT> CodeStubAssembler::IsSequentialStringInstanceType(
TNode<BoolT> CodeStubAssembler::IsSeqOneByteStringInstanceType(
TNode<Int32T> instance_type) {
- CSA_ASSERT(this, IsStringInstanceType(instance_type));
+ CSA_DCHECK(this, IsStringInstanceType(instance_type));
return Word32Equal(
Word32And(instance_type,
Int32Constant(kStringRepresentationMask | kStringEncodingMask)),
@@ -6336,7 +6342,7 @@ TNode<BoolT> CodeStubAssembler::IsSeqOneByteStringInstanceType(
TNode<BoolT> CodeStubAssembler::IsConsStringInstanceType(
TNode<Int32T> instance_type) {
- CSA_ASSERT(this, IsStringInstanceType(instance_type));
+ CSA_DCHECK(this, IsStringInstanceType(instance_type));
return Word32Equal(
Word32And(instance_type, Int32Constant(kStringRepresentationMask)),
Int32Constant(kConsStringTag));
@@ -6344,7 +6350,7 @@ TNode<BoolT> CodeStubAssembler::IsConsStringInstanceType(
TNode<BoolT> CodeStubAssembler::IsIndirectStringInstanceType(
TNode<Int32T> instance_type) {
- CSA_ASSERT(this, IsStringInstanceType(instance_type));
+ CSA_DCHECK(this, IsStringInstanceType(instance_type));
STATIC_ASSERT(kIsIndirectStringMask == 0x1);
STATIC_ASSERT(kIsIndirectStringTag == 0x1);
return UncheckedCast<BoolT>(
@@ -6353,7 +6359,7 @@ TNode<BoolT> CodeStubAssembler::IsIndirectStringInstanceType(
TNode<BoolT> CodeStubAssembler::IsExternalStringInstanceType(
TNode<Int32T> instance_type) {
- CSA_ASSERT(this, IsStringInstanceType(instance_type));
+ CSA_DCHECK(this, IsStringInstanceType(instance_type));
return Word32Equal(
Word32And(instance_type, Int32Constant(kStringRepresentationMask)),
Int32Constant(kExternalStringTag));
@@ -6361,7 +6367,7 @@ TNode<BoolT> CodeStubAssembler::IsExternalStringInstanceType(
TNode<BoolT> CodeStubAssembler::IsUncachedExternalStringInstanceType(
TNode<Int32T> instance_type) {
- CSA_ASSERT(this, IsStringInstanceType(instance_type));
+ CSA_DCHECK(this, IsStringInstanceType(instance_type));
STATIC_ASSERT(kUncachedExternalStringTag != 0);
return IsSetWord32(instance_type, kUncachedExternalStringMask);
}
@@ -6653,7 +6659,7 @@ TNode<BoolT> CodeStubAssembler::IsUniqueNameNoIndex(TNode<HeapObject> object) {
// Semantics: {object} is a Symbol, or a String that doesn't have a cached
// index. This returns {true} for strings containing representations of
// integers in the range above 9999999 (per kMaxCachedArrayIndexLength)
-// and below MAX_SAFE_INTEGER. For CSA_ASSERTs ensuring correct usage, this is
+// and below MAX_SAFE_INTEGER. For CSA_DCHECKs ensuring correct usage, this is
// better than no checking; and we don't have a good/fast way to accurately
// check such strings for being within "array index" (uint32_t) range.
TNode<BoolT> CodeStubAssembler::IsUniqueNameNoCachedIndex(
@@ -6913,7 +6919,7 @@ TNode<BoolT> CodeStubAssembler::FixedArraySizeDoesntFitInNewSpace(
TNode<Uint16T> CodeStubAssembler::StringCharCodeAt(TNode<String> string,
TNode<UintPtrT> index) {
- CSA_ASSERT(this, UintPtrLessThan(index, LoadStringLengthAsWord(string)));
+ CSA_DCHECK(this, UintPtrLessThan(index, LoadStringLengthAsWord(string)));
TVARIABLE(Uint16T, var_result);
@@ -7305,7 +7311,7 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
TNode<Numeric> CodeStubAssembler::NonNumberToNumberOrNumeric(
TNode<Context> context, TNode<HeapObject> input, Object::Conversion mode,
BigIntHandling bigint_handling) {
- CSA_ASSERT(this, Word32BinaryNot(IsHeapNumber(input)));
+ CSA_DCHECK(this, Word32BinaryNot(IsHeapNumber(input)));
TVARIABLE(HeapObject, var_input, input);
TVARIABLE(Numeric, var_result);
@@ -7347,7 +7353,7 @@ TNode<Numeric> CodeStubAssembler::NonNumberToNumberOrNumeric(
// Number/Numeric.
var_input = CAST(result);
// We have a new input. Redo the check and reload instance_type.
- CSA_ASSERT(this, Word32BinaryNot(IsHeapNumber(var_input.value())));
+ CSA_DCHECK(this, Word32BinaryNot(IsHeapNumber(var_input.value())));
instance_type = LoadInstanceType(var_input.value());
Goto(&if_inputisnotreceiver);
}
@@ -7406,7 +7412,7 @@ TNode<Numeric> CodeStubAssembler::NonNumberToNumberOrNumeric(
BIND(&end);
if (mode == Object::Conversion::kToNumber) {
- CSA_ASSERT(this, IsNumber(var_result.value()));
+ CSA_DCHECK(this, IsNumber(var_result.value()));
}
return var_result.value();
}
@@ -7420,7 +7426,7 @@ TNode<Number> CodeStubAssembler::NonNumberToNumber(
void CodeStubAssembler::TryPlainPrimitiveNonNumberToNumber(
TNode<HeapObject> input, TVariable<Number>* var_result, Label* if_bailout) {
- CSA_ASSERT(this, Word32BinaryNot(IsHeapNumber(input)));
+ CSA_DCHECK(this, Word32BinaryNot(IsHeapNumber(input)));
Label done(this);
// Dispatch on the {input} instance type.
@@ -7839,11 +7845,11 @@ TNode<Word32T> CodeStubAssembler::UpdateWord32(TNode<Word32T> word,
bool starts_as_zero) {
DCHECK_EQ((mask >> shift) << shift, mask);
// Ensure the {value} fits fully in the mask.
- CSA_ASSERT(this, Uint32LessThanOrEqual(value, Uint32Constant(mask >> shift)));
+ CSA_DCHECK(this, Uint32LessThanOrEqual(value, Uint32Constant(mask >> shift)));
TNode<Word32T> encoded_value = Word32Shl(value, Int32Constant(shift));
TNode<Word32T> masked_word;
if (starts_as_zero) {
- CSA_ASSERT(this, Word32Equal(Word32And(word, Int32Constant(~mask)), word));
+ CSA_DCHECK(this, Word32Equal(Word32And(word, Int32Constant(~mask)), word));
masked_word = word;
} else {
masked_word = Word32And(word, Int32Constant(~mask));
@@ -7857,12 +7863,12 @@ TNode<WordT> CodeStubAssembler::UpdateWord(TNode<WordT> word,
bool starts_as_zero) {
DCHECK_EQ((mask >> shift) << shift, mask);
// Ensure the {value} fits fully in the mask.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
UintPtrLessThanOrEqual(value, UintPtrConstant(mask >> shift)));
TNode<WordT> encoded_value = WordShl(value, static_cast<int>(shift));
TNode<WordT> masked_word;
if (starts_as_zero) {
- CSA_ASSERT(this, WordEqual(WordAnd(word, UintPtrConstant(~mask)), word));
+ CSA_DCHECK(this, WordEqual(WordAnd(word, UintPtrConstant(~mask)), word));
masked_word = word;
} else {
masked_word = WordAnd(word, UintPtrConstant(~mask));
@@ -7996,7 +8002,7 @@ void CodeStubAssembler::TryToName(TNode<Object> key, Label* if_keyisindex,
{
TNode<IntPtrT> index = Signed(
DecodeWordFromWord32<String::ArrayIndexValueBits>(raw_hash_field));
- CSA_ASSERT(this, IntPtrLessThan(index, IntPtrConstant(INT_MAX)));
+ CSA_DCHECK(this, IntPtrLessThan(index, IntPtrConstant(INT_MAX)));
*var_index = index;
Goto(if_keyisindex);
}
@@ -8015,28 +8021,28 @@ void CodeStubAssembler::TryToName(TNode<Object> key, Label* if_keyisindex,
void CodeStubAssembler::StringWriteToFlatOneByte(TNode<String> source,
TNode<RawPtrT> sink,
- TNode<Int32T> from,
- TNode<Int32T> to) {
+ TNode<Int32T> start,
+ TNode<Int32T> length) {
TNode<ExternalReference> function =
ExternalConstant(ExternalReference::string_write_to_flat_one_byte());
CallCFunction(function, base::nullopt,
std::make_pair(MachineType::AnyTagged(), source),
std::make_pair(MachineType::Pointer(), sink),
- std::make_pair(MachineType::Int32(), from),
- std::make_pair(MachineType::Int32(), to));
+ std::make_pair(MachineType::Int32(), start),
+ std::make_pair(MachineType::Int32(), length));
}
void CodeStubAssembler::StringWriteToFlatTwoByte(TNode<String> source,
TNode<RawPtrT> sink,
- TNode<Int32T> from,
- TNode<Int32T> to) {
+ TNode<Int32T> start,
+ TNode<Int32T> length) {
TNode<ExternalReference> function =
ExternalConstant(ExternalReference::string_write_to_flat_two_byte());
CallCFunction(function, base::nullopt,
std::make_pair(MachineType::AnyTagged(), source),
std::make_pair(MachineType::Pointer(), sink),
- std::make_pair(MachineType::Int32(), from),
- std::make_pair(MachineType::Int32(), to));
+ std::make_pair(MachineType::Int32(), start),
+ std::make_pair(MachineType::Int32(), length));
}
TNode<RawPtr<Uint8T>> CodeStubAssembler::ExternalOneByteStringGetChars(
@@ -8349,7 +8355,7 @@ TNode<UintPtrT> CodeStubAssembler::UintPtrMin(TNode<UintPtrT> left,
template <>
TNode<HeapObject> CodeStubAssembler::LoadName<NameDictionary>(
TNode<HeapObject> key) {
- CSA_ASSERT(this, Word32Or(IsTheHole(key), IsName(key)));
+ CSA_DCHECK(this, Word32Or(IsTheHole(key), IsName(key)));
return key;
}
@@ -8370,7 +8376,7 @@ void CodeStubAssembler::NameDictionaryLookup(
DCHECK_EQ(MachineType::PointerRepresentation(), var_name_index->rep());
DCHECK_IMPLIES(mode == kFindInsertionIndex, if_found == nullptr);
Comment("NameDictionaryLookup");
- CSA_ASSERT(this, IsUniqueName(unique_name));
+ CSA_DCHECK(this, IsUniqueName(unique_name));
TNode<IntPtrT> capacity = SmiUntag(GetCapacity<Dictionary>(dictionary));
TNode<IntPtrT> mask = IntPtrSub(capacity, IntPtrConstant(1));
@@ -8378,14 +8384,14 @@ void CodeStubAssembler::NameDictionaryLookup(
// See Dictionary::FirstProbe().
TNode<IntPtrT> count = IntPtrConstant(0);
- TNode<IntPtrT> entry = Signed(WordAnd(hash, mask));
+ TNode<IntPtrT> initial_entry = Signed(WordAnd(hash, mask));
TNode<Oddball> undefined = UndefinedConstant();
// Appease the variable merging algorithm for "Goto(&loop)" below.
*var_name_index = IntPtrConstant(0);
TVARIABLE(IntPtrT, var_count, count);
- TVARIABLE(IntPtrT, var_entry, entry);
+ TVARIABLE(IntPtrT, var_entry, initial_entry);
Label loop(this, {&var_count, &var_entry, var_name_index});
Goto(&loop);
BIND(&loop);
@@ -8458,7 +8464,7 @@ void CodeStubAssembler::NameDictionaryLookup(
void CodeStubAssembler::NumberDictionaryLookup(
TNode<NumberDictionary> dictionary, TNode<IntPtrT> intptr_index,
Label* if_found, TVariable<IntPtrT>* var_entry, Label* if_not_found) {
- CSA_ASSERT(this, IsNumberDictionary(dictionary));
+ CSA_DCHECK(this, IsNumberDictionary(dictionary));
DCHECK_EQ(MachineType::PointerRepresentation(), var_entry->rep());
Comment("NumberDictionaryLookup");
@@ -8470,14 +8476,14 @@ void CodeStubAssembler::NumberDictionaryLookup(
// See Dictionary::FirstProbe().
TNode<IntPtrT> count = IntPtrConstant(0);
- TNode<IntPtrT> entry = Signed(WordAnd(hash, mask));
+ TNode<IntPtrT> initial_entry = Signed(WordAnd(hash, mask));
TNode<Oddball> undefined = UndefinedConstant();
TNode<Oddball> the_hole = TheHoleConstant();
TVARIABLE(IntPtrT, var_count, count);
Label loop(this, {&var_count, var_entry});
- *var_entry = entry;
+ *var_entry = initial_entry;
Goto(&loop);
BIND(&loop);
{
@@ -8564,7 +8570,7 @@ void CodeStubAssembler::InsertEntry<NameDictionary>(
TNode<NameDictionary> dictionary, TNode<Name> name, TNode<Object> value,
TNode<IntPtrT> index, TNode<Smi> enum_index) {
// This should only be used for adding, not updating existing mappings.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32Or(TaggedEqual(LoadFixedArrayElement(dictionary, index),
UndefinedConstant()),
TaggedEqual(LoadFixedArrayElement(dictionary, index),
@@ -8608,7 +8614,7 @@ void CodeStubAssembler::InsertEntry<GlobalDictionary>(
template <class Dictionary>
void CodeStubAssembler::Add(TNode<Dictionary> dictionary, TNode<Name> key,
TNode<Object> value, Label* bailout) {
- CSA_ASSERT(this, Word32BinaryNot(IsEmptyPropertyDictionary(dictionary)));
+ CSA_DCHECK(this, Word32BinaryNot(IsEmptyPropertyDictionary(dictionary)));
TNode<Smi> capacity = GetCapacity<Dictionary>(dictionary);
TNode<Smi> nof = GetNumberOfElements<Dictionary>(dictionary);
TNode<Smi> new_nof = SmiAdd(nof, SmiConstant(1));
@@ -8619,7 +8625,7 @@ void CodeStubAssembler::Add(TNode<Dictionary> dictionary, TNode<Name> key,
GotoIf(SmiBelow(capacity, required_capacity_pseudo_smi), bailout);
// Require rehashing if more than 50% of free elements are deleted elements.
TNode<Smi> deleted = GetNumberOfDeletedElements<Dictionary>(dictionary);
- CSA_ASSERT(this, SmiAbove(capacity, new_nof));
+ CSA_DCHECK(this, SmiAbove(capacity, new_nof));
TNode<Smi> half_of_free_elements = SmiShr(SmiSub(capacity, new_nof), 1);
GotoIf(SmiAbove(deleted, half_of_free_elements), bailout);
@@ -8705,7 +8711,7 @@ void CodeStubAssembler::LookupLinear(TNode<Name> unique_name,
std::is_base_of<DescriptorArray, Array>::value,
"T must be a descendant of FixedArray or a WeakFixedArray");
Comment("LookupLinear");
- CSA_ASSERT(this, IsUniqueName(unique_name));
+ CSA_DCHECK(this, IsUniqueName(unique_name));
TNode<IntPtrT> first_inclusive = IntPtrConstant(Array::ToKeyIndex(0));
TNode<IntPtrT> factor = IntPtrConstant(Array::kEntrySize);
TNode<IntPtrT> last_exclusive = IntPtrAdd(
@@ -8817,10 +8823,10 @@ void CodeStubAssembler::LookupBinary(TNode<Name> unique_name,
Unsigned(Int32Sub(NumberOfEntries<Array>(array), Int32Constant(1)));
TVARIABLE(Uint32T, var_high, limit);
TNode<Uint32T> hash = LoadNameHashAssumeComputed(unique_name);
- CSA_ASSERT(this, Word32NotEqual(hash, Int32Constant(0)));
+ CSA_DCHECK(this, Word32NotEqual(hash, Int32Constant(0)));
// Assume non-empty array.
- CSA_ASSERT(this, Uint32LessThanOrEqual(var_low.value(), var_high.value()));
+ CSA_DCHECK(this, Uint32LessThanOrEqual(var_low.value(), var_high.value()));
Label binary_loop(this, {&var_high, &var_low});
Goto(&binary_loop);
@@ -8942,7 +8948,7 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
}
BIND(&if_string);
{
- CSA_ASSERT(this, IsString(next_key));
+ CSA_DCHECK(this, IsString(next_key));
// Process string property when |var_is_symbol_processing_loop| is
// false.
Branch(var_is_symbol_processing_loop.value(), &next_iteration,
@@ -9101,7 +9107,7 @@ TNode<NativeContext> CodeStubAssembler::GetCreationContext(
// Remote objects don't have a creation context.
GotoIf(IsFunctionTemplateInfoMap(function_map), if_bailout);
- CSA_ASSERT(this, IsJSFunctionMap(receiver_map));
+ CSA_DCHECK(this, IsJSFunctionMap(receiver_map));
var_function = CAST(receiver);
Goto(&done);
@@ -9184,8 +9190,8 @@ void CodeStubAssembler::TryLookupPropertyInSimpleObject(
Label* if_found_fast, Label* if_found_dict,
TVariable<HeapObject>* var_meta_storage, TVariable<IntPtrT>* var_name_index,
Label* if_not_found, Label* bailout) {
- CSA_ASSERT(this, IsSimpleObjectMap(map));
- CSA_ASSERT(this, IsUniqueNameNoCachedIndex(unique_name));
+ CSA_DCHECK(this, IsSimpleObjectMap(map));
+ CSA_DCHECK(this, IsUniqueNameNoCachedIndex(unique_name));
TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
Label if_isfastmap(this), if_isslowmap(this);
@@ -9249,7 +9255,7 @@ void CodeStubAssembler::TryHasOwnProperty(TNode<HeapObject> object,
Label* if_found, Label* if_not_found,
Label* if_bailout) {
Comment("TryHasOwnProperty");
- CSA_ASSERT(this, IsUniqueNameNoCachedIndex(unique_name));
+ CSA_DCHECK(this, IsUniqueNameNoCachedIndex(unique_name));
TVARIABLE(HeapObject, var_meta_storage);
TVARIABLE(IntPtrT, var_name_index);
@@ -9347,8 +9353,9 @@ void CodeStubAssembler::LoadPropertyFromFastObject(
DecodeWord32<PropertyDetails::LocationField>(details);
Label if_in_field(this), if_in_descriptor(this), done(this);
- Branch(Word32Equal(location, Int32Constant(kField)), &if_in_field,
- &if_in_descriptor);
+ Branch(Word32Equal(location, Int32Constant(static_cast<int32_t>(
+ PropertyLocation::kField))),
+ &if_in_field, &if_in_descriptor);
BIND(&if_in_field);
{
TNode<IntPtrT> field_index =
@@ -9357,7 +9364,7 @@ void CodeStubAssembler::LoadPropertyFromFastObject(
DecodeWord32<PropertyDetails::RepresentationField>(details);
// TODO(ishell): support WasmValues.
- CSA_ASSERT(this, Word32NotEqual(representation,
+ CSA_DCHECK(this, Word32NotEqual(representation,
Int32Constant(Representation::kWasmValue)));
field_index =
IntPtrAdd(field_index, LoadMapInobjectPropertiesStartInWords(map));
@@ -9619,7 +9626,7 @@ void CodeStubAssembler::TryGetOwnProperty(
Label* if_not_found, Label* if_bailout, GetOwnPropertyMode mode) {
DCHECK_EQ(MachineRepresentation::kTagged, var_value->rep());
Comment("TryGetOwnProperty");
- CSA_ASSERT(this, IsUniqueNameNoCachedIndex(unique_name));
+ CSA_DCHECK(this, IsUniqueNameNoCachedIndex(unique_name));
TVARIABLE(HeapObject, var_meta_storage);
TVARIABLE(IntPtrT, var_entry);
@@ -9944,12 +9951,12 @@ void CodeStubAssembler::TryPrototypeChainLookup(
GotoIf(IsNull(proto), if_end);
- TNode<Map> map = LoadMap(proto);
- TNode<Uint16T> instance_type = LoadMapInstanceType(map);
+ TNode<Map> proto_map = LoadMap(proto);
+ TNode<Uint16T> proto_instance_type = LoadMapInstanceType(proto_map);
var_holder = proto;
- var_holder_map = map;
- var_holder_instance_type = instance_type;
+ var_holder_map = proto_map;
+ var_holder_instance_type = proto_instance_type;
Goto(&loop);
}
}
@@ -9974,12 +9981,12 @@ void CodeStubAssembler::TryPrototypeChainLookup(
GotoIf(IsNull(proto), if_end);
- TNode<Map> map = LoadMap(proto);
- TNode<Uint16T> instance_type = LoadMapInstanceType(map);
+ TNode<Map> proto_map = LoadMap(proto);
+ TNode<Uint16T> proto_instance_type = LoadMapInstanceType(proto_map);
var_holder = proto;
- var_holder_map = map;
- var_holder_instance_type = instance_type;
+ var_holder_map = proto_map;
+ var_holder_instance_type = proto_instance_type;
Goto(&loop);
}
}
@@ -10024,7 +10031,7 @@ TNode<Oddball> CodeStubAssembler::HasInPrototypeChain(TNode<Context> context,
GotoIf(TaggedEqual(object_prototype, prototype), &return_true);
// Continue with the prototype.
- CSA_ASSERT(this, TaggedIsNotSmi(object_prototype));
+ CSA_DCHECK(this, TaggedIsNotSmi(object_prototype));
var_object_map = LoadMap(object_prototype);
Goto(&loop);
}
@@ -10274,7 +10281,7 @@ void CodeStubAssembler::UpdateFeedback(TNode<Smi> feedback,
MaybeUpdateFeedback(feedback, maybe_feedback_vector, slot_id);
break;
case UpdateFeedbackMode::kGuaranteedFeedback:
- CSA_ASSERT(this, IsFeedbackVector(maybe_feedback_vector));
+ CSA_DCHECK(this, IsFeedbackVector(maybe_feedback_vector));
UpdateFeedback(feedback, CAST(maybe_feedback_vector), slot_id);
break;
}
@@ -10518,7 +10525,7 @@ void CodeStubAssembler::StoreElementTypedArrayWord32(TNode<RawPtrT> elements,
"Only UintPtrT or IntPtrT indices is allowed");
DCHECK(IsTypedArrayElementsKind(kind));
if (kind == UINT8_CLAMPED_ELEMENTS) {
- CSA_ASSERT(this, Word32Equal(value, Word32And(Int32Constant(0xFF), value)));
+ CSA_DCHECK(this, Word32Equal(value, Word32And(Int32Constant(0xFF), value)));
}
TNode<IntPtrT> offset = ElementOffsetFromIndex(index, kind, 0);
// TODO(cbruni): Add OOB check once typed.
@@ -11017,13 +11024,13 @@ void CodeStubAssembler::EmitElementStore(
TNode<JSObject> object, TNode<Object> key, TNode<Object> value,
ElementsKind elements_kind, KeyedAccessStoreMode store_mode, Label* bailout,
TNode<Context> context, TVariable<Object>* maybe_converted_value) {
- CSA_ASSERT(this, Word32BinaryNot(IsJSProxy(object)));
+ CSA_DCHECK(this, Word32BinaryNot(IsJSProxy(object)));
TNode<FixedArrayBase> elements = LoadElements(object);
if (!(IsSmiOrObjectElementsKind(elements_kind) ||
IsSealedElementsKind(elements_kind) ||
IsNonextensibleElementsKind(elements_kind))) {
- CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
+ CSA_DCHECK(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
} else if (!IsCOWHandlingStoreMode(store_mode)) {
GotoIf(IsFixedCOWArrayMap(LoadMap(elements)), bailout);
}
@@ -11124,13 +11131,13 @@ void CodeStubAssembler::EmitElementStore(
if (!(IsSmiOrObjectElementsKind(elements_kind) ||
IsSealedElementsKind(elements_kind) ||
IsNonextensibleElementsKind(elements_kind))) {
- CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
+ CSA_DCHECK(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
} else if (IsCOWHandlingStoreMode(store_mode)) {
elements = CopyElementsOnWrite(object, elements, elements_kind,
Signed(length), bailout);
}
- CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
+ CSA_DCHECK(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
if (float_value) {
StoreElement(elements, elements_kind, intptr_key, float_value.value());
} else {
@@ -11179,7 +11186,7 @@ TNode<FixedArrayBase> CodeStubAssembler::CheckForCapacityGrow(
Runtime::kGrowArrayElements, NoContextConstant(), object, tagged_key);
GotoIf(TaggedIsSmi(maybe_elements), bailout);
TNode<FixedArrayBase> new_elements = CAST(maybe_elements);
- CSA_ASSERT(this, IsFixedArrayWithKind(new_elements, kind));
+ CSA_DCHECK(this, IsFixedArrayWithKind(new_elements, kind));
checked_elements = new_elements;
Goto(&fits_capacity);
}
@@ -11246,12 +11253,12 @@ void CodeStubAssembler::TransitionElementsKind(TNode<JSObject> object,
TNode<IntPtrT> array_length = Select<IntPtrT>(
IsJSArray(object),
[=]() {
- CSA_ASSERT(this, IsFastElementsKind(LoadElementsKind(object)));
+ CSA_DCHECK(this, IsFastElementsKind(LoadElementsKind(object)));
return SmiUntag(LoadFastJSArrayLength(CAST(object)));
},
[=]() { return elements_length; });
- CSA_ASSERT(this, WordNotEqual(elements_length, IntPtrConstant(0)));
+ CSA_DCHECK(this, WordNotEqual(elements_length, IntPtrConstant(0)));
GrowElementsCapacity(object, elements, from_kind, to_kind, array_length,
elements_length, bailout);
@@ -11343,7 +11350,7 @@ TNode<IntPtrT> CodeStubAssembler::PageFromAddress(TNode<IntPtrT> address) {
TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot) {
TNode<IntPtrT> size = IntPtrConstant(AllocationSite::kSizeWithWeakNext);
- TNode<HeapObject> site = Allocate(size, CodeStubAssembler::kPretenured);
+ TNode<HeapObject> site = Allocate(size, AllocationFlag::kPretenured);
StoreMapNoWriteBarrier(site, RootIndex::kAllocationSiteWithWeakNextMap);
// Should match AllocationSite::Initialize.
TNode<WordT> field = UpdateWord<AllocationSite::ElementsKindBits>(
@@ -11420,7 +11427,7 @@ TNode<Int32T> CodeStubAssembler::LoadElementsKind(
TNode<Int32T> elements_kind =
Signed(DecodeWord32<AllocationSite::ElementsKindBits>(
SmiToInt32(transition_info)));
- CSA_ASSERT(this, IsFastElementsKind(elements_kind));
+ CSA_DCHECK(this, IsFastElementsKind(elements_kind));
return elements_kind;
}
@@ -11490,7 +11497,7 @@ void CodeStubAssembler::BuildFastArrayForEach(
TNode<TIndex> last_element_exclusive, const FastArrayForEachBody& body,
ForEachDirection direction) {
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
- CSA_SLOW_ASSERT(this, Word32Or(IsFixedArrayWithKind(array, kind),
+ CSA_SLOW_DCHECK(this, Word32Or(IsFixedArrayWithKind(array, kind),
IsPropertyArray(array)));
intptr_t first_val;
@@ -11546,7 +11553,7 @@ void CodeStubAssembler::InitializeFieldsWithRoot(TNode<HeapObject> object,
TNode<IntPtrT> start_offset,
TNode<IntPtrT> end_offset,
RootIndex root_index) {
- CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
+ CSA_SLOW_DCHECK(this, TaggedIsNotSmi(object));
start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag));
end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag));
TNode<AnyTaggedT> root_value;
@@ -11697,7 +11704,7 @@ TNode<Context> CodeStubAssembler::GotoIfHasContextExtensionUpToDepth(
Label no_extension(this);
// Loop until the depth is 0.
- CSA_ASSERT(this, Word32NotEqual(cur_depth.value(), Int32Constant(0)));
+ CSA_DCHECK(this, Word32NotEqual(cur_depth.value(), Int32Constant(0)));
Goto(&context_search);
BIND(&context_search);
{
@@ -12188,7 +12195,7 @@ void CodeStubAssembler::GenerateEqual_Same(TNode<Object> value, Label* if_equal,
BIND(&if_string);
{
- CSA_ASSERT(this, IsString(value_heapobject));
+ CSA_DCHECK(this, IsString(value_heapobject));
CombineFeedback(var_type_feedback,
CollectFeedbackForString(instance_type));
Goto(if_equal);
@@ -12196,28 +12203,28 @@ void CodeStubAssembler::GenerateEqual_Same(TNode<Object> value, Label* if_equal,
BIND(&if_symbol);
{
- CSA_ASSERT(this, IsSymbol(value_heapobject));
+ CSA_DCHECK(this, IsSymbol(value_heapobject));
CombineFeedback(var_type_feedback, CompareOperationFeedback::kSymbol);
Goto(if_equal);
}
BIND(&if_receiver);
{
- CSA_ASSERT(this, IsJSReceiver(value_heapobject));
+ CSA_DCHECK(this, IsJSReceiver(value_heapobject));
CombineFeedback(var_type_feedback, CompareOperationFeedback::kReceiver);
Goto(if_equal);
}
BIND(&if_bigint);
{
- CSA_ASSERT(this, IsBigInt(value_heapobject));
+ CSA_DCHECK(this, IsBigInt(value_heapobject));
CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
Goto(if_equal);
}
BIND(&if_oddball);
{
- CSA_ASSERT(this, IsOddball(value_heapobject));
+ CSA_DCHECK(this, IsOddball(value_heapobject));
Label if_boolean(this), if_not_boolean(this);
Branch(IsBooleanMap(value_map), &if_boolean, &if_not_boolean);
@@ -12229,7 +12236,7 @@ void CodeStubAssembler::GenerateEqual_Same(TNode<Object> value, Label* if_equal,
BIND(&if_not_boolean);
{
- CSA_ASSERT(this, IsNullOrUndefined(value_heapobject));
+ CSA_DCHECK(this, IsNullOrUndefined(value_heapobject));
CombineFeedback(var_type_feedback,
CompareOperationFeedback::kReceiverOrNullOrUndefined);
Goto(if_equal);
@@ -12318,8 +12325,8 @@ TNode<Oddball> CodeStubAssembler::Equal(TNode<Object> left, TNode<Object> right,
BIND(&if_right_not_smi);
{
TNode<Map> right_map = LoadMap(CAST(right));
- Label if_right_heapnumber(this), if_right_boolean(this),
- if_right_oddball(this), if_right_bigint(this, Label::kDeferred),
+ Label if_right_heapnumber(this), if_right_oddball(this),
+ if_right_bigint(this, Label::kDeferred),
if_right_receiver(this, Label::kDeferred);
GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
@@ -12607,7 +12614,7 @@ TNode<Oddball> CodeStubAssembler::Equal(TNode<Object> left, TNode<Object> right,
BIND(&if_left_receiver);
{
- CSA_ASSERT(this, IsJSReceiverInstanceType(left_type));
+ CSA_DCHECK(this, IsJSReceiverInstanceType(left_type));
Label if_right_receiver(this), if_right_not_receiver(this);
Branch(IsJSReceiverInstanceType(right_type), &if_right_receiver,
&if_right_not_receiver);
@@ -12632,7 +12639,7 @@ TNode<Oddball> CodeStubAssembler::Equal(TNode<Object> left, TNode<Object> right,
BIND(&if_right_undetectable);
{
// When we get here, {right} must be either Null or Undefined.
- CSA_ASSERT(this, IsNullOrUndefined(right));
+ CSA_DCHECK(this, IsNullOrUndefined(right));
if (var_type_feedback != nullptr) {
*var_type_feedback = SmiConstant(
CompareOperationFeedback::kReceiverOrNullOrUndefined);
@@ -13273,7 +13280,7 @@ TNode<Oddball> CodeStubAssembler::HasProperty(TNode<Context> context,
}
BIND(&end);
- CSA_ASSERT(this, IsBoolean(result.value()));
+ CSA_DCHECK(this, IsBoolean(result.value()));
return result.value();
}
@@ -13294,7 +13301,7 @@ void CodeStubAssembler::ForInPrepare(TNode<HeapObject> enumerator,
// Load the enumeration length and cache from the {enumerator}.
TNode<Map> map_enumerator = CAST(enumerator);
TNode<WordT> enum_length = LoadMapEnumLength(map_enumerator);
- CSA_ASSERT(this, WordNotEqual(enum_length,
+ CSA_DCHECK(this, WordNotEqual(enum_length,
IntPtrConstant(kInvalidEnumCacheSentinel)));
TNode<DescriptorArray> descriptors = LoadMapDescriptors(map_enumerator);
TNode<EnumCache> enum_cache = LoadObjectField<EnumCache>(
@@ -13373,7 +13380,7 @@ TNode<String> CodeStubAssembler::Typeof(TNode<Object> value) {
GotoIf(IsBigIntInstanceType(instance_type), &return_bigint);
- CSA_ASSERT(this, InstanceTypeEqual(instance_type, SYMBOL_TYPE));
+ CSA_DCHECK(this, InstanceTypeEqual(instance_type, SYMBOL_TYPE));
result_var = HeapConstant(isolate()->factory()->symbol_string());
Goto(&return_result);
@@ -13721,7 +13728,7 @@ TNode<Number> CodeStubAssembler::BitwiseOp(TNode<Word32T> left32,
TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResult(
TNode<Context> context, TNode<Object> value, TNode<Oddball> done) {
- CSA_ASSERT(this, IsBoolean(done));
+ CSA_DCHECK(this, IsBoolean(done));
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Map> map = CAST(
LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX));
@@ -13856,7 +13863,7 @@ TNode<UintPtrT> CodeStubAssembler::LoadVariableLengthJSTypedArrayLength(
BIND(&is_gsab);
{
// Non-length-tracking GSAB-backed TypedArrays shouldn't end up here.
- CSA_ASSERT(this, IsLengthTrackingTypedArray(array));
+ CSA_DCHECK(this, IsLengthTrackingTypedArray(array));
// Read the byte length from the BackingStore.
const TNode<ExternalReference> length_function = ExternalConstant(
ExternalReference::length_tracking_gsab_backed_typed_array_length());
@@ -13916,7 +13923,7 @@ TNode<UintPtrT> CodeStubAssembler::LoadVariableLengthJSTypedArrayLength(
return result.value();
}
-void CodeStubAssembler::IsTypedArrayDetachedOrOutOfBounds(
+void CodeStubAssembler::IsJSTypedArrayDetachedOrOutOfBounds(
TNode<JSTypedArray> array, Label* detached_or_oob,
Label* not_detached_nor_oob) {
TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(array);
@@ -14083,7 +14090,7 @@ TNode<RawPtrT> CodeStubArguments::AtIndexPtr(TNode<IntPtrT> index) const {
}
TNode<Object> CodeStubArguments::AtIndex(TNode<IntPtrT> index) const {
- CSA_ASSERT(assembler_, assembler_->UintPtrOrSmiLessThan(
+ CSA_DCHECK(assembler_, assembler_->UintPtrOrSmiLessThan(
index, GetLengthWithoutReceiver()));
return assembler_->LoadFullTagged(AtIndexPtr(index));
}
@@ -14201,7 +14208,7 @@ TNode<BoolT> CodeStubAssembler::IsFastSmiElementsKind(
TNode<BoolT> CodeStubAssembler::IsHoleyFastElementsKind(
TNode<Int32T> elements_kind) {
- CSA_ASSERT(this, IsFastElementsKind(elements_kind));
+ CSA_DCHECK(this, IsFastElementsKind(elements_kind));
STATIC_ASSERT(HOLEY_SMI_ELEMENTS == (PACKED_SMI_ELEMENTS | 1));
STATIC_ASSERT(HOLEY_ELEMENTS == (PACKED_ELEMENTS | 1));
@@ -14211,7 +14218,7 @@ TNode<BoolT> CodeStubAssembler::IsHoleyFastElementsKind(
TNode<BoolT> CodeStubAssembler::IsHoleyFastElementsKindForRead(
TNode<Int32T> elements_kind) {
- CSA_ASSERT(this, Uint32LessThanOrEqual(
+ CSA_DCHECK(this, Uint32LessThanOrEqual(
elements_kind,
Int32Constant(LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND)));
@@ -14230,6 +14237,11 @@ TNode<BoolT> CodeStubAssembler::IsElementsKindGreaterThan(
return Int32GreaterThan(target_kind, Int32Constant(reference_kind));
}
+TNode<BoolT> CodeStubAssembler::IsElementsKindGreaterThanOrEqual(
+ TNode<Int32T> target_kind, ElementsKind reference_kind) {
+ return Int32GreaterThanOrEqual(target_kind, Int32Constant(reference_kind));
+}
+
TNode<BoolT> CodeStubAssembler::IsElementsKindLessThanOrEqual(
TNode<Int32T> target_kind, ElementsKind reference_kind) {
return Int32LessThanOrEqual(target_kind, Int32Constant(reference_kind));
@@ -14295,7 +14307,7 @@ TNode<BoolT> CodeStubAssembler::NeedsAnyPromiseHooks(TNode<Uint32T> flags) {
}
TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
- CSA_ASSERT(this, SmiBelow(builtin_id, SmiConstant(Builtins::kBuiltinCount)));
+ CSA_DCHECK(this, SmiBelow(builtin_id, SmiConstant(Builtins::kBuiltinCount)));
TNode<IntPtrT> offset =
ElementOffsetFromIndex(SmiToBInt(builtin_id), SYSTEM_POINTER_ELEMENTS);
@@ -14402,7 +14414,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
// IsInterpreterData: Interpret bytecode
BIND(&check_is_interpreter_data);
// This is the default branch, so assert that we have the expected data type.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32Equal(data_type, Int32Constant(INTERPRETER_DATA_TYPE)));
{
TNode<CodeT> trampoline =
@@ -14437,8 +14449,8 @@ TNode<JSFunction> CodeStubAssembler::AllocateFunctionWithMapAndContext(
// TODO(ishell): All the callers of this function pass map loaded from
// Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX. So we can remove
// map parameter.
- CSA_ASSERT(this, Word32BinaryNot(IsConstructorMap(map)));
- CSA_ASSERT(this, Word32BinaryNot(IsFunctionWithPrototypeSlotMap(map)));
+ CSA_DCHECK(this, Word32BinaryNot(IsConstructorMap(map)));
+ CSA_DCHECK(this, Word32BinaryNot(IsFunctionWithPrototypeSlotMap(map)));
const TNode<HeapObject> fun = Allocate(JSFunction::kSizeWithoutPrototype);
STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kTaggedSize);
StoreMapNoWriteBarrier(fun, map);
@@ -14519,7 +14531,7 @@ TNode<Map> CodeStubAssembler::CheckEnumCache(TNode<JSReceiver> receiver,
TNode<HeapObject> properties = LoadSlowProperties(receiver);
if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
- CSA_ASSERT(this, Word32Or(IsSwissNameDictionary(properties),
+ CSA_DCHECK(this, Word32Or(IsSwissNameDictionary(properties),
IsGlobalDictionary(properties)));
length = Select<Smi>(
@@ -14534,7 +14546,7 @@ TNode<Map> CodeStubAssembler::CheckEnumCache(TNode<JSReceiver> receiver,
});
} else {
- CSA_ASSERT(this, Word32Or(IsNameDictionary(properties),
+ CSA_DCHECK(this, Word32Or(IsNameDictionary(properties),
IsGlobalDictionary(properties)));
STATIC_ASSERT(static_cast<int>(NameDictionary::kNumberOfElementsIndex) ==
static_cast<int>(GlobalDictionary::kNumberOfElementsIndex));
@@ -14661,7 +14673,7 @@ TNode<JSArray> CodeStubAssembler::ArrayCreate(TNode<Context> context,
Label done(this), next(this), runtime(this, Label::kDeferred);
TNode<Smi> limit = SmiConstant(JSArray::kInitialMaxFastElementArray);
- CSA_ASSERT_BRANCH(this, [=](Label* ok, Label* not_ok) {
+ CSA_DCHECK_BRANCH(this, [=](Label* ok, Label* not_ok) {
BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual, length,
SmiConstant(0), ok, not_ok);
});
@@ -14720,7 +14732,7 @@ void CodeStubAssembler::SetPropertyLength(TNode<Context> context,
TNode<Smi> length_smi = CAST(length);
TNode<Smi> old_length = LoadFastJSArrayLength(fast_array);
- CSA_ASSERT(this, TaggedIsPositiveSmi(old_length));
+ CSA_DCHECK(this, TaggedIsPositiveSmi(old_length));
// 2) If the created array's length matches the required length, then
// there's nothing else to do. Otherwise use the runtime to set the
@@ -14818,13 +14830,13 @@ void PrototypeCheckAssembler::CheckAndBranch(TNode<HeapObject> prototype,
for (int i = 0; i < properties_.length(); i++) {
// Assert the descriptor index is in-bounds.
int descriptor = properties_[i].descriptor_index;
- CSA_ASSERT(this, Int32LessThan(Int32Constant(descriptor),
+ CSA_DCHECK(this, Int32LessThan(Int32Constant(descriptor),
LoadNumberOfDescriptors(descriptors)));
// Assert that the name is correct. This essentially checks that
// the descriptor index corresponds to the insertion order in
// the bootstrapper.
- CSA_ASSERT(
+ CSA_DCHECK(
this,
TaggedEqual(LoadKeyByDescriptorEntry(descriptors, descriptor),
CodeAssembler::LoadRoot(properties_[i].name_root_index)));
@@ -14938,7 +14950,7 @@ class MetaTableAccessor {
int bits = mt.MemSize() * 8;
TNode<UintPtrT> max_value = csa.UintPtrConstant((1ULL << bits) - 1);
- CSA_ASSERT(&csa, csa.UintPtrLessThanOrEqual(csa.ChangeUint32ToWord(data),
+ CSA_DCHECK(&csa, csa.UintPtrLessThanOrEqual(csa.ChangeUint32ToWord(data),
max_value));
#endif
@@ -14976,7 +14988,7 @@ class MetaTableAccessor {
csa.SmiToIntPtr(csa.LoadFixedArrayBaseLength(meta_table));
TNode<IntPtrT> max_allowed_offset = csa.IntPtrAdd(
byte_array_data_bytes, csa.IntPtrConstant(offset_to_data_minus_tag));
- CSA_ASSERT(&csa, csa.UintPtrLessThan(overall_offset, max_allowed_offset));
+ CSA_DCHECK(&csa, csa.UintPtrLessThan(overall_offset, max_allowed_offset));
#endif
return overall_offset;
@@ -15146,11 +15158,11 @@ TNode<SwissNameDictionary>
CodeStubAssembler::AllocateSwissNameDictionaryWithCapacity(
TNode<IntPtrT> capacity) {
Comment("[ AllocateSwissNameDictionaryWithCapacity");
- CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
- CSA_ASSERT(this, UintPtrGreaterThanOrEqual(
+ CSA_DCHECK(this, WordIsPowerOfTwo(capacity));
+ CSA_DCHECK(this, UintPtrGreaterThanOrEqual(
capacity,
IntPtrConstant(SwissNameDictionary::kInitialCapacity)));
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
UintPtrLessThanOrEqual(
capacity, IntPtrConstant(SwissNameDictionary::MaxCapacity())));
@@ -15190,7 +15202,7 @@ CodeStubAssembler::AllocateSwissNameDictionaryWithCapacity(
TNode<IntPtrT> total_size = SwissNameDictionarySizeFor(capacity);
TNode<SwissNameDictionary> table = UncheckedCast<SwissNameDictionary>(
- Allocate(total_size, kAllowLargeObjectAllocation));
+ Allocate(total_size, AllocationFlag::kAllowLargeObjectAllocation));
StoreMapNoWriteBarrier(table, RootIndex::kSwissNameDictionaryMap);
@@ -15290,7 +15302,7 @@ TNode<SwissNameDictionary> CodeStubAssembler::CopySwissNameDictionary(
TNode<IntPtrT> total_size = SwissNameDictionarySizeFor(capacity);
TNode<SwissNameDictionary> table = UncheckedCast<SwissNameDictionary>(
- Allocate(total_size, kAllowLargeObjectAllocation));
+ Allocate(total_size, AllocationFlag::kAllowLargeObjectAllocation));
StoreMapNoWriteBarrier(table, RootIndex::kSwissNameDictionaryMap);
@@ -15440,7 +15452,7 @@ TNode<IntPtrT>
CodeStubAssembler::SwissNameDictionaryOffsetIntoPropertyDetailsTableMT(
TNode<SwissNameDictionary> dict, TNode<IntPtrT> capacity,
TNode<IntPtrT> index) {
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
WordEqual(capacity, ChangeUint32ToWord(
LoadSwissNameDictionaryCapacity(dict))));
@@ -15457,7 +15469,7 @@ CodeStubAssembler::SwissNameDictionaryOffsetIntoPropertyDetailsTableMT(
TNode<IntPtrT> property_details_table_start =
IntPtrAdd(data_table_start, data_and_ctrl_table_size);
- CSA_ASSERT(
+ CSA_DCHECK(
this,
WordEqual(FieldSliceSwissNameDictionaryPropertyDetailsTable(dict).offset,
// Our calculation subtracted the tag, Torque's offset didn't.
@@ -15560,15 +15572,15 @@ TNode<Uint64T> CodeStubAssembler::LoadSwissNameDictionaryCtrlTableGroup(
void CodeStubAssembler::SwissNameDictionarySetCtrl(
TNode<SwissNameDictionary> table, TNode<IntPtrT> capacity,
TNode<IntPtrT> entry, TNode<Uint8T> ctrl) {
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
WordEqual(capacity, ChangeUint32ToWord(
LoadSwissNameDictionaryCapacity(table))));
- CSA_ASSERT(this, UintPtrLessThan(entry, capacity));
+ CSA_DCHECK(this, UintPtrLessThan(entry, capacity));
TNode<IntPtrT> one = IntPtrConstant(1);
TNode<IntPtrT> offset = SwissNameDictionaryCtrlTableStartOffsetMT(capacity);
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
WordEqual(FieldSliceSwissNameDictionaryCtrlTable(table).offset,
IntPtrAdd(offset, one)));
@@ -15590,11 +15602,11 @@ void CodeStubAssembler::SwissNameDictionarySetCtrl(
TNode<IntPtrT> offset_copy_entry = IntPtrAdd(offset, copy_entry);
// |entry| < |kGroupWidth| implies |copy_entry| == |capacity| + |entry|
- CSA_ASSERT(this, Word32Or(UintPtrGreaterThanOrEqual(entry, group_width),
+ CSA_DCHECK(this, Word32Or(UintPtrGreaterThanOrEqual(entry, group_width),
WordEqual(copy_entry, IntPtrAdd(capacity, entry))));
// |entry| >= |kGroupWidth| implies |copy_entry| == |entry|
- CSA_ASSERT(this, Word32Or(UintPtrLessThan(entry, group_width),
+ CSA_DCHECK(this, Word32Or(UintPtrLessThan(entry, group_width),
WordEqual(copy_entry, entry)));
// TODO(v8:11330): consider using StoreObjectFieldNoWriteBarrier here.
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index f869ac687f..1cb0b4cf6e 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -233,36 +233,36 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
#endif
#ifdef DEBUG
-// CSA_ASSERT_ARGS generates an
+// CSA_DCHECK_ARGS generates an
// std::initializer_list<CodeStubAssembler::ExtraNode> from __VA_ARGS__. It
// currently supports between 0 and 2 arguments.
// clang-format off
-#define CSA_ASSERT_0_ARGS(...) {}
-#define CSA_ASSERT_1_ARG(a, ...) {{a, #a}}
-#define CSA_ASSERT_2_ARGS(a, b, ...) {{a, #a}, {b, #b}}
+#define CSA_DCHECK_0_ARGS(...) {}
+#define CSA_DCHECK_1_ARG(a, ...) {{a, #a}}
+#define CSA_DCHECK_2_ARGS(a, b, ...) {{a, #a}, {b, #b}}
// clang-format on
-#define SWITCH_CSA_ASSERT_ARGS(dummy, a, b, FUNC, ...) FUNC(a, b)
-#define CSA_ASSERT_ARGS(...) \
- CALL(SWITCH_CSA_ASSERT_ARGS, (, ##__VA_ARGS__, CSA_ASSERT_2_ARGS, \
- CSA_ASSERT_1_ARG, CSA_ASSERT_0_ARGS))
+#define SWITCH_CSA_DCHECK_ARGS(dummy, a, b, FUNC, ...) FUNC(a, b)
+#define CSA_DCHECK_ARGS(...) \
+ CALL(SWITCH_CSA_DCHECK_ARGS, (, ##__VA_ARGS__, CSA_DCHECK_2_ARGS, \
+ CSA_DCHECK_1_ARG, CSA_DCHECK_0_ARGS))
// Workaround for MSVC to skip comma in empty __VA_ARGS__.
#define CALL(x, y) x y
-// CSA_ASSERT(csa, <condition>, <extra values to print...>)
+// CSA_DCHECK(csa, <condition>, <extra values to print...>)
-#define CSA_ASSERT(csa, condition_node, ...) \
- (csa)->Assert(condition_node, #condition_node, __FILE__, __LINE__, \
- CSA_ASSERT_ARGS(__VA_ARGS__))
+#define CSA_DCHECK(csa, condition_node, ...) \
+ (csa)->Dcheck(condition_node, #condition_node, __FILE__, __LINE__, \
+ CSA_DCHECK_ARGS(__VA_ARGS__))
-// CSA_ASSERT_BRANCH(csa, [](Label* ok, Label* not_ok) {...},
+// CSA_DCHECK_BRANCH(csa, [](Label* ok, Label* not_ok) {...},
// <extra values to print...>)
-#define CSA_ASSERT_BRANCH(csa, gen, ...) \
- (csa)->Assert(gen, #gen, __FILE__, __LINE__, CSA_ASSERT_ARGS(__VA_ARGS__))
+#define CSA_DCHECK_BRANCH(csa, gen, ...) \
+ (csa)->Dcheck(gen, #gen, __FILE__, __LINE__, CSA_DCHECK_ARGS(__VA_ARGS__))
-#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
- (csa)->Assert( \
+#define CSA_DCHECK_JS_ARGC_OP(csa, Op, op, expected) \
+ (csa)->Dcheck( \
[&]() -> TNode<BoolT> { \
const TNode<Word32T> argc = (csa)->UncheckedParameter<Word32T>( \
Descriptor::kJSActualArgumentsCount); \
@@ -274,8 +274,8 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
Descriptor::kJSActualArgumentsCount)), \
"argc"}})
-#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) \
- CSA_ASSERT_JS_ARGC_OP(csa, Word32Equal, ==, expected)
+#define CSA_DCHECK_JS_ARGC_EQ(csa, expected) \
+ CSA_DCHECK_JS_ARGC_OP(csa, Word32Equal, ==, expected)
#define CSA_DEBUG_INFO(name) \
{ #name, __FILE__, __LINE__ }
@@ -285,9 +285,9 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
#define TYPED_VARIABLE_CONSTRUCTOR(name, ...) \
name(CSA_DEBUG_INFO(name), __VA_ARGS__)
#else // DEBUG
-#define CSA_ASSERT(csa, ...) ((void)0)
-#define CSA_ASSERT_BRANCH(csa, ...) ((void)0)
-#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) ((void)0)
+#define CSA_DCHECK(csa, ...) ((void)0)
+#define CSA_DCHECK_BRANCH(csa, ...) ((void)0)
+#define CSA_DCHECK_JS_ARGC_EQ(csa, expected) ((void)0)
#define BIND(label) Bind(label)
#define TYPED_VARIABLE_DEF(type, name, ...) TVariable<type> name(__VA_ARGS__)
#define TYPED_VARIABLE_CONSTRUCTOR(name, ...) name(__VA_ARGS__)
@@ -298,12 +298,12 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
EXPAND(TYPED_VARIABLE_CONSTRUCTOR(__VA_ARGS__, this))
#ifdef ENABLE_SLOW_DCHECKS
-#define CSA_SLOW_ASSERT(csa, ...) \
+#define CSA_SLOW_DCHECK(csa, ...) \
if (FLAG_enable_slow_asserts) { \
- CSA_ASSERT(csa, __VA_ARGS__); \
+ CSA_DCHECK(csa, __VA_ARGS__); \
}
#else
-#define CSA_SLOW_ASSERT(csa, ...) ((void)0)
+#define CSA_SLOW_DCHECK(csa, ...) ((void)0)
#endif
// Provides JavaScript-specific "macro-assembler" functionality on top of the
@@ -322,7 +322,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
explicit CodeStubAssembler(compiler::CodeAssemblerState* state);
- enum AllocationFlag : uint8_t {
+ enum class AllocationFlag : uint8_t {
kNone = 0,
kDoubleAlignment = 1,
kPretenured = 1 << 1,
@@ -753,13 +753,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Operation bitwise_op);
// Allocate an object of the given size.
- TNode<HeapObject> AllocateInNewSpace(TNode<IntPtrT> size,
- AllocationFlags flags = kNone);
- TNode<HeapObject> AllocateInNewSpace(int size, AllocationFlags flags = kNone);
+ TNode<HeapObject> AllocateInNewSpace(
+ TNode<IntPtrT> size, AllocationFlags flags = AllocationFlag::kNone);
+ TNode<HeapObject> AllocateInNewSpace(
+ int size, AllocationFlags flags = AllocationFlag::kNone);
TNode<HeapObject> Allocate(TNode<IntPtrT> size,
- AllocationFlags flags = kNone);
+ AllocationFlags flags = AllocationFlag::kNone);
- TNode<HeapObject> Allocate(int size, AllocationFlags flags = kNone);
+ TNode<HeapObject> Allocate(int size,
+ AllocationFlags flags = AllocationFlag::kNone);
TNode<BoolT> IsRegularHeapObjectSize(TNode<IntPtrT> size);
@@ -768,13 +770,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
using NodeGenerator = std::function<TNode<T>()>;
using ExtraNode = std::pair<TNode<Object>, const char*>;
- void Assert(const BranchGenerator& branch, const char* message,
+ void Dcheck(const BranchGenerator& branch, const char* message,
const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes = {});
- void Assert(const NodeGenerator<BoolT>& condition_body, const char* message,
+ void Dcheck(const NodeGenerator<BoolT>& condition_body, const char* message,
const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes = {});
- void Assert(TNode<Word32T> condition_node, const char* message,
+ void Dcheck(TNode<Word32T> condition_node, const char* message,
const char* file, int line,
std::initializer_list<ExtraNode> extra_nodes = {});
void Check(const BranchGenerator& branch, const char* message,
@@ -1097,7 +1099,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<ExternalString> object) {
// This is only valid for ExternalStrings where the resource data
// pointer is cached (i.e. no uncached external strings).
- CSA_ASSERT(this, Word32NotEqual(
+ CSA_DCHECK(this, Word32NotEqual(
Word32And(LoadInstanceType(object),
Int32Constant(kUncachedExternalStringMask)),
Int32Constant(kUncachedExternalStringTag)));
@@ -1236,7 +1238,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<IntPtrT> offset =
IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
- CSA_ASSERT(this, TaggedIsNotSmi(reference.object));
+ CSA_DCHECK(this, TaggedIsNotSmi(reference.object));
return CAST(
LoadFromObject(MachineTypeOf<T>::value, reference.object, offset));
}
@@ -1270,7 +1272,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
TNode<IntPtrT> offset =
IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
- CSA_ASSERT(this, TaggedIsNotSmi(reference.object));
+ CSA_DCHECK(this, TaggedIsNotSmi(reference.object));
StoreToObject(rep, reference.object, offset, value, write_barrier);
}
template <class T, typename std::enable_if<
@@ -1809,17 +1811,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
AllocationFlags flags);
// Allocate a ByteArray with the given length.
- TNode<ByteArray> AllocateByteArray(TNode<UintPtrT> length,
- AllocationFlags flags = kNone);
+ TNode<ByteArray> AllocateByteArray(
+ TNode<UintPtrT> length, AllocationFlags flags = AllocationFlag::kNone);
// Allocate a SeqOneByteString with the given length.
- TNode<String> AllocateSeqOneByteString(uint32_t length,
- AllocationFlags flags = kNone);
+ TNode<String> AllocateSeqOneByteString(
+ uint32_t length, AllocationFlags flags = AllocationFlag::kNone);
using TorqueGeneratedExportedMacrosAssembler::AllocateSeqOneByteString;
// Allocate a SeqTwoByteString with the given length.
- TNode<String> AllocateSeqTwoByteString(uint32_t length,
- AllocationFlags flags = kNone);
+ TNode<String> AllocateSeqTwoByteString(
+ uint32_t length, AllocationFlags flags = AllocationFlag::kNone);
using TorqueGeneratedExportedMacrosAssembler::AllocateSeqTwoByteString;
// Allocate a SlicedOneByteString with the given length, parent and offset.
@@ -1836,9 +1838,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<NameDictionary> AllocateNameDictionary(int at_least_space_for);
TNode<NameDictionary> AllocateNameDictionary(
- TNode<IntPtrT> at_least_space_for, AllocationFlags = kNone);
+ TNode<IntPtrT> at_least_space_for,
+ AllocationFlags = AllocationFlag::kNone);
TNode<NameDictionary> AllocateNameDictionaryWithCapacity(
- TNode<IntPtrT> capacity, AllocationFlags = kNone);
+ TNode<IntPtrT> capacity, AllocationFlags = AllocationFlag::kNone);
TNode<NameDictionary> CopyNameDictionary(TNode<NameDictionary> dictionary,
Label* large_object_fallback);
@@ -1856,7 +1859,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Map> map,
base::Optional<TNode<HeapObject>> properties = base::nullopt,
base::Optional<TNode<FixedArray>> elements = base::nullopt,
- AllocationFlags flags = kNone,
+ AllocationFlags flags = AllocationFlag::kNone,
SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
void InitializeJSObjectFromMap(
@@ -1881,30 +1884,33 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
AllocateUninitializedJSArrayWithElements(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
base::Optional<TNode<AllocationSite>> allocation_site,
- TNode<IntPtrT> capacity, AllocationFlags allocation_flags = kNone,
+ TNode<IntPtrT> capacity,
+ AllocationFlags allocation_flags = AllocationFlag::kNone,
int array_header_size = JSArray::kHeaderSize);
// Allocate a JSArray and fill elements with the hole.
TNode<JSArray> AllocateJSArray(
ElementsKind kind, TNode<Map> array_map, TNode<IntPtrT> capacity,
TNode<Smi> length, base::Optional<TNode<AllocationSite>> allocation_site,
- AllocationFlags allocation_flags = kNone);
+ AllocationFlags allocation_flags = AllocationFlag::kNone);
TNode<JSArray> AllocateJSArray(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> capacity,
TNode<Smi> length, base::Optional<TNode<AllocationSite>> allocation_site,
- AllocationFlags allocation_flags = kNone) {
+ AllocationFlags allocation_flags = AllocationFlag::kNone) {
return AllocateJSArray(kind, array_map, SmiUntag(capacity), length,
allocation_site, allocation_flags);
}
- TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
- TNode<Smi> capacity, TNode<Smi> length,
- AllocationFlags allocation_flags = kNone) {
+ TNode<JSArray> AllocateJSArray(
+ ElementsKind kind, TNode<Map> array_map, TNode<Smi> capacity,
+ TNode<Smi> length,
+ AllocationFlags allocation_flags = AllocationFlag::kNone) {
return AllocateJSArray(kind, array_map, SmiUntag(capacity), length,
base::nullopt, allocation_flags);
}
- TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
- TNode<IntPtrT> capacity, TNode<Smi> length,
- AllocationFlags allocation_flags = kNone) {
+ TNode<JSArray> AllocateJSArray(
+ ElementsKind kind, TNode<Map> array_map, TNode<IntPtrT> capacity,
+ TNode<Smi> length,
+ AllocationFlags allocation_flags = AllocationFlag::kNone) {
return AllocateJSArray(kind, array_map, capacity, length, base::nullopt,
allocation_flags);
}
@@ -1937,7 +1943,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
template <typename TIndex>
TNode<FixedArrayBase> AllocateFixedArray(
- ElementsKind kind, TNode<TIndex> capacity, AllocationFlags flags = kNone,
+ ElementsKind kind, TNode<TIndex> capacity,
+ AllocationFlags flags = AllocationFlag::kNone,
base::Optional<TNode<Map>> fixed_array_map = base::nullopt);
TNode<NativeContext> GetCreationContext(TNode<JSReceiver> receiver,
@@ -2622,6 +2629,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsHoleyFastElementsKindForRead(TNode<Int32T> elements_kind);
TNode<BoolT> IsElementsKindGreaterThan(TNode<Int32T> target_kind,
ElementsKind reference_kind);
+ TNode<BoolT> IsElementsKindGreaterThanOrEqual(TNode<Int32T> target_kind,
+ ElementsKind reference_kind);
TNode<BoolT> IsElementsKindLessThanOrEqual(TNode<Int32T> target_kind,
ElementsKind reference_kind);
// Check if lower_reference_kind <= target_kind <= higher_reference_kind.
@@ -2881,9 +2890,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Call non-allocating runtime String::WriteToFlat using fast C-calls.
void StringWriteToFlatOneByte(TNode<String> source, TNode<RawPtrT> sink,
- TNode<Int32T> from, TNode<Int32T> to);
+ TNode<Int32T> start, TNode<Int32T> length);
void StringWriteToFlatTwoByte(TNode<String> source, TNode<RawPtrT> sink,
- TNode<Int32T> from, TNode<Int32T> to);
+ TNode<Int32T> start, TNode<Int32T> length);
// Calls External{One,Two}ByteString::GetChars with a fast C-call.
TNode<RawPtr<Uint8T>> ExternalOneByteStringGetChars(
@@ -3563,9 +3572,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<UintPtrT> LoadVariableLengthJSTypedArrayByteLength(
TNode<Context> context, TNode<JSTypedArray> array,
TNode<JSArrayBuffer> buffer);
- void IsTypedArrayDetachedOrOutOfBounds(TNode<JSTypedArray> array,
- Label* detached_or_oob,
- Label* not_detached_nor_oob);
+ void IsJSTypedArrayDetachedOrOutOfBounds(TNode<JSTypedArray> array,
+ Label* detached_or_oob,
+ Label* not_detached_nor_oob);
TNode<IntPtrT> RabGsabElementsKindToElementByteSize(
TNode<Int32T> elementsKind);
@@ -4068,7 +4077,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TVariable<Number>* var_result,
Label* if_bailout);
- void AssertHasValidMap(TNode<HeapObject> object);
+ void DcheckHasValidMap(TNode<HeapObject> object);
template <typename TValue>
void EmitElementStoreTypedArray(TNode<JSTypedArray> typed_array,
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index 0c04e84a68..8f42fa7f50 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -7,9 +7,11 @@
#include "src/api/api.h"
#include "src/base/ieee754.h"
#include "src/codegen/cpu-features.h"
+#include "src/common/globals.h"
#include "src/date/date.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/isolate-utils.h"
#include "src/execution/isolate.h"
#include "src/execution/microtask-queue.h"
#include "src/execution/simulator-base.h"
@@ -188,8 +190,8 @@ ExternalReference ExternalReference::Create(const Runtime::Function* f) {
}
// static
-ExternalReference ExternalReference::Create(Address address) {
- return ExternalReference(Redirect(address));
+ExternalReference ExternalReference::Create(Address address, Type type) {
+ return ExternalReference(Redirect(address, type));
}
ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
@@ -759,6 +761,11 @@ ExternalReference ExternalReference::address_of_regexp_stack_memory_top_address(
isolate->regexp_stack()->memory_top_address_address());
}
+ExternalReference ExternalReference::address_of_regexp_stack_stack_pointer(
+ Isolate* isolate) {
+ return ExternalReference(isolate->regexp_stack()->stack_pointer_address());
+}
+
ExternalReference ExternalReference::javascript_execution_assert(
Isolate* isolate) {
return ExternalReference(isolate->javascript_execution_assert_address());
@@ -882,35 +889,37 @@ ExternalReference ExternalReference::search_string_raw_two_two() {
namespace {
-void StringWriteToFlatOneByte(Address source, uint8_t* sink, int32_t from,
- int32_t to) {
- return String::WriteToFlat<uint8_t>(String::cast(Object(source)), sink, from,
- to);
+void StringWriteToFlatOneByte(Address source, uint8_t* sink, int32_t start,
+ int32_t length) {
+ return String::WriteToFlat<uint8_t>(String::cast(Object(source)), sink, start,
+ length);
}
-void StringWriteToFlatTwoByte(Address source, uint16_t* sink, int32_t from,
- int32_t to) {
- return String::WriteToFlat<uint16_t>(String::cast(Object(source)), sink, from,
- to);
+void StringWriteToFlatTwoByte(Address source, uint16_t* sink, int32_t start,
+ int32_t length) {
+ return String::WriteToFlat<uint16_t>(String::cast(Object(source)), sink,
+ start, length);
}
const uint8_t* ExternalOneByteStringGetChars(Address string) {
+ PtrComprCageBase cage_base = GetPtrComprCageBaseFromOnHeapAddress(string);
// The following CHECK is a workaround to prevent a CFI bug where
// ExternalOneByteStringGetChars() and ExternalTwoByteStringGetChars() are
// merged by the linker, resulting in one of the input type's vtable address
// failing the address range check.
// TODO(chromium:1160961): Consider removing the CHECK when CFI is fixed.
- CHECK(Object(string).IsExternalOneByteString());
- return ExternalOneByteString::cast(Object(string)).GetChars();
+ CHECK(Object(string).IsExternalOneByteString(cage_base));
+ return ExternalOneByteString::cast(Object(string)).GetChars(cage_base);
}
const uint16_t* ExternalTwoByteStringGetChars(Address string) {
+ PtrComprCageBase cage_base = GetPtrComprCageBaseFromOnHeapAddress(string);
// The following CHECK is a workaround to prevent a CFI bug where
// ExternalOneByteStringGetChars() and ExternalTwoByteStringGetChars() are
// merged by the linker, resulting in one of the input type's vtable address
// failing the address range check.
// TODO(chromium:1160961): Consider removing the CHECK when CFI is fixed.
- CHECK(Object(string).IsExternalTwoByteString());
- return ExternalTwoByteString::cast(Object(string)).GetChars();
+ CHECK(Object(string).IsExternalTwoByteString(cage_base));
+ return ExternalTwoByteString::cast(Object(string)).GetChars(cage_base);
}
} // namespace
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index ca62ff9d7a..fe80bef62a 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -72,6 +72,8 @@ class StatsCounter;
"RegExpStack::limit_address_address()") \
V(address_of_regexp_stack_memory_top_address, \
"RegExpStack::memory_top_address_address()") \
+ V(address_of_regexp_stack_stack_pointer, \
+ "RegExpStack::stack_pointer_address()") \
V(address_of_static_offsets_vector, "OffsetsVector::static_offsets_vector") \
V(thread_in_wasm_flag_address_address, \
"Isolate::thread_in_wasm_flag_address_address") \
@@ -328,6 +330,10 @@ class ExternalReference {
// ObjectPair f(v8::internal::Arguments).
BUILTIN_CALL_PAIR,
+ // TODO(mslekova): Once FAST_C_CALL is supported in the simulator,
+ // the following four specific types and their special handling
+ // can be removed, as the generic call supports them.
+
// Builtin that takes float arguments and returns an int.
// int f(double, double).
BUILTIN_COMPARE_CALL,
@@ -359,7 +365,11 @@ class ExternalReference {
// Call to accessor getter callback via InvokeAccessorGetterCallback.
// void f(Local<Name> property, PropertyCallbackInfo& info,
// AccessorNameGetterCallback callback)
- PROFILING_GETTER_CALL
+ PROFILING_GETTER_CALL,
+
+ // C call, either representing a fast API call or used in tests.
+ // Can have arbitrary signature from the types supported by the fast API.
+ FAST_C_CALL
};
#define COUNT_EXTERNAL_REFERENCE(name, desc) +1
@@ -380,7 +390,8 @@ class ExternalReference {
static ExternalReference Create(const Runtime::Function* f);
static ExternalReference Create(IsolateAddressId id, Isolate* isolate);
static ExternalReference Create(Runtime::FunctionId id);
- static V8_EXPORT_PRIVATE ExternalReference Create(Address address);
+ static V8_EXPORT_PRIVATE ExternalReference
+ Create(Address address, Type type = ExternalReference::BUILTIN_CALL);
template <typename SubjectChar, typename PatternChar>
static ExternalReference search_string_raw();
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
index f4ff4914fb..18aa39461d 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
@@ -86,7 +86,7 @@ HeapObject RelocInfo::target_object() {
return HeapObject::cast(Object(ReadUnalignedValue<Address>(pc_)));
}
-HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
return target_object();
}
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index e921c11552..b5bbcee83f 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -2188,21 +2188,6 @@ void Assembler::cvtss2sd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-void Assembler::cvtsd2ss(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x5A);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::cvtdq2ps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x5B);
- emit_sse_operand(dst, src);
-}
-
void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
@@ -2211,13 +2196,6 @@ void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::cvtps2pd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x5A);
- emit_sse_operand(dst, src);
-}
-
void Assembler::cvtpd2ps(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2242,59 +2220,6 @@ void Assembler::cvttpd2dq(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::addsd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x58);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::mulsd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x59);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::subsd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x5C);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::divsd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x5E);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::rcpps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x53);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::sqrtps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x51);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::rsqrtps(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x52);
- emit_sse_operand(dst, src);
-}
-
void Assembler::cmpps(XMMRegister dst, Operand src, uint8_t cmp) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
@@ -2312,14 +2237,6 @@ void Assembler::cmppd(XMMRegister dst, Operand src, uint8_t cmp) {
EMIT(cmp);
}
-void Assembler::sqrtsd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x51);
- emit_sse_operand(dst, src);
-}
-
void Assembler::haddps(XMMRegister dst, Operand src) {
DCHECK(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
@@ -2408,22 +2325,6 @@ void Assembler::pmovmskb(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-void Assembler::maxsd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x5F);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::minsd(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x5D);
- emit_sse_operand(dst, src);
-}
-
void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
@@ -2969,16 +2870,12 @@ void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
- vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
-}
-
void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(op, dst, src1, src2, kF3, k0F, kWIG);
}
void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
- vinstr(op, dst, src1, src2, kNone, k0F, kWIG);
+ vinstr(op, dst, src1, src2, kNoPrefix, k0F, kWIG);
}
void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
@@ -2993,27 +2890,27 @@ void Assembler::vshufpd(XMMRegister dst, XMMRegister src1, Operand src2,
}
void Assembler::vmovhlps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vinstr(0x12, dst, src1, src2, kNone, k0F, kWIG);
+ vinstr(0x12, dst, src1, src2, kNoPrefix, k0F, kWIG);
}
void Assembler::vmovlhps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vinstr(0x16, dst, src1, src2, kNone, k0F, kWIG);
+ vinstr(0x16, dst, src1, src2, kNoPrefix, k0F, kWIG);
}
void Assembler::vmovlps(XMMRegister dst, XMMRegister src1, Operand src2) {
- vinstr(0x12, dst, src1, src2, kNone, k0F, kWIG);
+ vinstr(0x12, dst, src1, src2, kNoPrefix, k0F, kWIG);
}
void Assembler::vmovlps(Operand dst, XMMRegister src) {
- vinstr(0x13, src, xmm0, dst, kNone, k0F, kWIG);
+ vinstr(0x13, src, xmm0, dst, kNoPrefix, k0F, kWIG);
}
void Assembler::vmovhps(XMMRegister dst, XMMRegister src1, Operand src2) {
- vinstr(0x16, dst, src1, src2, kNone, k0F, kWIG);
+ vinstr(0x16, dst, src1, src2, kNoPrefix, k0F, kWIG);
}
void Assembler::vmovhps(Operand dst, XMMRegister src) {
- vinstr(0x17, src, xmm0, dst, kNone, k0F, kWIG);
+ vinstr(0x17, src, xmm0, dst, kNoPrefix, k0F, kWIG);
}
void Assembler::vcmpps(XMMRegister dst, XMMRegister src1, Operand src2,
@@ -3167,6 +3064,16 @@ void Assembler::vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
EMIT(offset);
}
+void Assembler::vroundsd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ RoundingMode mode) {
+ vinstr(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
+ EMIT(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+}
+void Assembler::vroundss(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ RoundingMode mode) {
+ vinstr(0x0a, dst, src1, src2, k66, k0F3A, kWIG);
+ EMIT(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+}
void Assembler::vroundps(XMMRegister dst, XMMRegister src, RoundingMode mode) {
vinstr(0x08, dst, xmm0, Operand(src), k66, k0F3A, kWIG);
EMIT(static_cast<byte>(mode) | 0x8); // Mask precision exception.
@@ -3187,7 +3094,7 @@ void Assembler::vmovmskpd(Register dst, XMMRegister src) {
void Assembler::vmovmskps(Register dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(xmm0, kL128, kNone, k0F, kWIG);
+ emit_vex_prefix(xmm0, kL128, kNoPrefix, k0F, kWIG);
EMIT(0x50);
emit_sse_operand(dst, src);
}
@@ -3212,7 +3119,7 @@ void Assembler::vpcmpgtq(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
void Assembler::bmi1(byte op, Register reg, Register vreg, Operand rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
- emit_vex_prefix(vreg, kLZ, kNone, k0F38, kW0);
+ emit_vex_prefix(vreg, kLZ, kNoPrefix, k0F38, kW0);
EMIT(op);
emit_operand(reg, rm);
}
@@ -3264,6 +3171,14 @@ void Assembler::rorx(Register dst, Operand src, byte imm8) {
EMIT(imm8);
}
+void Assembler::sse_instr(XMMRegister dst, Operand src, byte escape,
+ byte opcode) {
+ EnsureSpace ensure_space(this);
+ EMIT(escape);
+ EMIT(opcode);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::sse2_instr(XMMRegister dst, Operand src, byte prefix,
byte escape, byte opcode) {
EnsureSpace ensure_space(this);
@@ -3471,27 +3386,27 @@ void Assembler::emit_operand(XMMRegister reg, Operand adr) {
void Assembler::emit_operand(int code, Operand adr) {
// Isolate-independent code may not embed relocatable addresses.
- DCHECK(!options().isolate_independent_code ||
- adr.rmode_ != RelocInfo::CODE_TARGET);
- DCHECK(!options().isolate_independent_code ||
- adr.rmode_ != RelocInfo::FULL_EMBEDDED_OBJECT);
- DCHECK(!options().isolate_independent_code ||
- adr.rmode_ != RelocInfo::EXTERNAL_REFERENCE);
-
- const unsigned length = adr.len_;
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ adr.rmode() != RelocInfo::CODE_TARGET);
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ adr.rmode() != RelocInfo::FULL_EMBEDDED_OBJECT);
+ DCHECK_IMPLIES(options().isolate_independent_code,
+ adr.rmode() != RelocInfo::EXTERNAL_REFERENCE);
+
+ const unsigned length = adr.encoded_bytes().length();
DCHECK_GT(length, 0);
// Emit updated ModRM byte containing the given register.
- EMIT((adr.buf_[0] & ~0x38) | (code << 3));
+ EMIT((adr.encoded_bytes()[0] & ~0x38) | (code << 3));
// Emit the rest of the encoded operand.
- for (unsigned i = 1; i < length; i++) EMIT(adr.buf_[i]);
+ for (unsigned i = 1; i < length; i++) EMIT(adr.encoded_bytes()[i]);
// Emit relocation information if necessary.
- if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode_)) {
+ if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode())) {
pc_ -= sizeof(int32_t); // pc_ must be *at* disp32
- RecordRelocInfo(adr.rmode_);
- if (adr.rmode_ == RelocInfo::INTERNAL_REFERENCE) { // Fixup for labels
+ RecordRelocInfo(adr.rmode());
+ if (adr.rmode() == RelocInfo::INTERNAL_REFERENCE) { // Fixup for labels
emit_label(ReadUnalignedValue<Label*>(reinterpret_cast<Address>(pc_)));
} else {
pc_ += sizeof(int32_t);
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index 31fc2c0221..b099dfcdd3 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -269,6 +269,9 @@ class V8_EXPORT_PRIVATE Operand {
// register.
Register reg() const;
+ base::Vector<const byte> encoded_bytes() const { return {buf_, len_}; }
+ RelocInfo::Mode rmode() { return rmode_; }
+
private:
// Set the ModRM byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
@@ -298,9 +301,6 @@ class V8_EXPORT_PRIVATE Operand {
uint8_t len_ = 0;
// Only valid if len_ > 4.
RelocInfo::Mode rmode_ = RelocInfo::NONE;
-
- // TODO(clemensb): Get rid of this friendship, or make Operand immutable.
- friend class Assembler;
};
ASSERT_TRIVIALLY_COPYABLE(Operand);
static_assert(sizeof(Operand) <= 2 * kSystemPointerSize,
@@ -899,12 +899,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void minss(XMMRegister dst, XMMRegister src) { minss(dst, Operand(src)); }
void minss(XMMRegister dst, Operand src);
- void rcpps(XMMRegister dst, Operand src);
- void rcpps(XMMRegister dst, XMMRegister src) { rcpps(dst, Operand(src)); }
- void sqrtps(XMMRegister dst, Operand src);
- void sqrtps(XMMRegister dst, XMMRegister src) { sqrtps(dst, Operand(src)); }
- void rsqrtps(XMMRegister dst, Operand src);
- void rsqrtps(XMMRegister dst, XMMRegister src) { rsqrtps(dst, Operand(src)); }
void haddps(XMMRegister dst, Operand src);
void haddps(XMMRegister dst, XMMRegister src) { haddps(dst, Operand(src)); }
void sqrtpd(XMMRegister dst, Operand src) {
@@ -961,16 +955,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cvtss2sd(XMMRegister dst, XMMRegister src) {
cvtss2sd(dst, Operand(src));
}
- void cvtsd2ss(XMMRegister dst, Operand src);
- void cvtsd2ss(XMMRegister dst, XMMRegister src) {
- cvtsd2ss(dst, Operand(src));
- }
- void cvtdq2ps(XMMRegister dst, XMMRegister src) {
- cvtdq2ps(dst, Operand(src));
- }
- void cvtdq2ps(XMMRegister dst, Operand src);
void cvtdq2pd(XMMRegister dst, XMMRegister src);
- void cvtps2pd(XMMRegister dst, XMMRegister src);
void cvtpd2ps(XMMRegister dst, XMMRegister src);
void cvttps2dq(XMMRegister dst, XMMRegister src) {
cvttps2dq(dst, Operand(src));
@@ -978,17 +963,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cvttps2dq(XMMRegister dst, Operand src);
void cvttpd2dq(XMMRegister dst, XMMRegister src);
- void addsd(XMMRegister dst, XMMRegister src) { addsd(dst, Operand(src)); }
- void addsd(XMMRegister dst, Operand src);
- void subsd(XMMRegister dst, XMMRegister src) { subsd(dst, Operand(src)); }
- void subsd(XMMRegister dst, Operand src);
- void mulsd(XMMRegister dst, XMMRegister src) { mulsd(dst, Operand(src)); }
- void mulsd(XMMRegister dst, Operand src);
- void divsd(XMMRegister dst, XMMRegister src) { divsd(dst, Operand(src)); }
- void divsd(XMMRegister dst, Operand src);
- void sqrtsd(XMMRegister dst, XMMRegister src) { sqrtsd(dst, Operand(src)); }
- void sqrtsd(XMMRegister dst, Operand src);
-
void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); }
void ucomisd(XMMRegister dst, Operand src);
@@ -1010,11 +984,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cmpltsd(XMMRegister dst, XMMRegister src);
- void maxsd(XMMRegister dst, XMMRegister src) { maxsd(dst, Operand(src)); }
- void maxsd(XMMRegister dst, Operand src);
- void minsd(XMMRegister dst, XMMRegister src) { minsd(dst, Operand(src)); }
- void minsd(XMMRegister dst, Operand src);
-
void movdqa(XMMRegister dst, Operand src);
void movdqa(Operand dst, XMMRegister src);
void movdqa(XMMRegister dst, XMMRegister src);
@@ -1266,50 +1235,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void vfmass(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
- void vaddsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vaddsd(dst, src1, Operand(src2));
- }
- void vaddsd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vsd(0x58, dst, src1, src2);
- }
- void vsubsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vsubsd(dst, src1, Operand(src2));
- }
- void vsubsd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vsd(0x5c, dst, src1, src2);
- }
- void vmulsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vmulsd(dst, src1, Operand(src2));
- }
- void vmulsd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vsd(0x59, dst, src1, src2);
- }
- void vdivsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vdivsd(dst, src1, Operand(src2));
- }
- void vdivsd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vsd(0x5e, dst, src1, src2);
- }
- void vmaxsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vmaxsd(dst, src1, Operand(src2));
- }
- void vmaxsd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vsd(0x5f, dst, src1, src2);
- }
- void vminsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vminsd(dst, src1, Operand(src2));
- }
- void vminsd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vsd(0x5d, dst, src1, src2);
- }
- void vsqrtsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vsqrtsd(dst, src1, Operand(src2));
- }
- void vsqrtsd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vsd(0x51, dst, src1, src2);
- }
- void vsd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
-
void vaddss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vaddss(dst, src1, Operand(src2));
}
@@ -1354,20 +1279,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
- void vrcpps(XMMRegister dst, XMMRegister src) { vrcpps(dst, Operand(src)); }
- void vrcpps(XMMRegister dst, Operand src) {
- vinstr(0x53, dst, xmm0, src, kNone, k0F, kWIG);
- }
- void vsqrtps(XMMRegister dst, XMMRegister src) { vsqrtps(dst, Operand(src)); }
- void vsqrtps(XMMRegister dst, Operand src) {
- vinstr(0x51, dst, xmm0, src, kNone, k0F, kWIG);
- }
- void vrsqrtps(XMMRegister dst, XMMRegister src) {
- vrsqrtps(dst, Operand(src));
- }
- void vrsqrtps(XMMRegister dst, Operand src) {
- vinstr(0x52, dst, xmm0, src, kNone, k0F, kWIG);
- }
void vhaddps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vhaddps(dst, src1, Operand(src2));
}
@@ -1501,21 +1412,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t offset);
+ void vroundsd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ RoundingMode mode);
+ void vroundss(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ RoundingMode mode);
void vroundps(XMMRegister dst, XMMRegister src, RoundingMode mode);
void vroundpd(XMMRegister dst, XMMRegister src, RoundingMode mode);
- void vcvtdq2ps(XMMRegister dst, XMMRegister src) {
- vcvtdq2ps(dst, Operand(src));
- }
- void vcvtdq2ps(XMMRegister dst, Operand src) {
- vinstr(0x5B, dst, xmm0, src, kNone, k0F, kWIG);
- }
void vcvtdq2pd(XMMRegister dst, XMMRegister src) {
vinstr(0xE6, dst, xmm0, src, kF3, k0F, kWIG);
}
- void vcvtps2pd(XMMRegister dst, XMMRegister src) {
- vinstr(0x5A, dst, xmm0, src, kNone, k0F, kWIG);
- }
void vcvtpd2ps(XMMRegister dst, XMMRegister src) {
vinstr(0x5A, dst, xmm0, src, k66, k0F, kWIG);
}
@@ -1528,6 +1434,28 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vcvttpd2dq(XMMRegister dst, XMMRegister src) {
vinstr(0xE6, dst, xmm0, src, k66, k0F, kWIG);
}
+ void vcvttsd2si(Register dst, XMMRegister src) {
+ XMMRegister idst = XMMRegister::from_code(dst.code());
+ vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
+ }
+ void vcvttsd2si(Register dst, Operand src) {
+ XMMRegister idst = XMMRegister::from_code(dst.code());
+ vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
+ }
+ void vcvtss2sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
+ }
+ void vcvtss2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
+ vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
+ }
+ void vcvttss2si(Register dst, XMMRegister src) {
+ XMMRegister idst = XMMRegister::from_code(dst.code());
+ vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
+ }
+ void vcvttss2si(Register dst, Operand src) {
+ XMMRegister idst = XMMRegister::from_code(dst.code());
+ vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
+ }
void vmovddup(XMMRegister dst, Operand src) {
vinstr(0x12, dst, xmm0, src, kF2, k0F, kWIG);
@@ -1570,6 +1498,19 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vpmovmskb(Register dst, XMMRegister src);
+ void vucomisd(XMMRegister dst, XMMRegister src) {
+ vinstr(0x2E, dst, xmm0, src, k66, k0F, kWIG);
+ }
+ void vucomisd(XMMRegister dst, Operand src) {
+ vinstr(0x2E, dst, xmm0, src, k66, k0F, kWIG);
+ }
+ void vucomiss(XMMRegister dst, XMMRegister src) {
+ vinstr(0x2E, dst, xmm0, src, kNoPrefix, k0F, kWIG);
+ }
+ void vucomiss(XMMRegister dst, Operand src) {
+ vinstr(0x2E, dst, xmm0, src, kNoPrefix, k0F, kWIG);
+ }
+
// BMI instruction
void andn(Register dst, Register src1, Register src2) {
andn(dst, src1, Operand(src2));
@@ -1602,7 +1543,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
bzhi(dst, Operand(src1), src2);
}
void bzhi(Register dst, Operand src1, Register src2) {
- bmi2(kNone, 0xf5, dst, src2, src1);
+ bmi2(kNoPrefix, 0xf5, dst, src2, src1);
}
void mulx(Register dst1, Register dst2, Register src) {
mulx(dst1, dst2, Operand(src));
@@ -1721,6 +1662,23 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
#undef PACKED_CMP_LIST
// Other SSE and AVX instructions
+#define DECLARE_SSE_UNOP_AND_AVX(instruction, escape, opcode) \
+ void instruction(XMMRegister dst, XMMRegister src) { \
+ instruction(dst, Operand(src)); \
+ } \
+ void instruction(XMMRegister dst, Operand src) { \
+ sse_instr(dst, src, 0x##escape, 0x##opcode); \
+ } \
+ void v##instruction(XMMRegister dst, XMMRegister src) { \
+ v##instruction(dst, Operand(src)); \
+ } \
+ void v##instruction(XMMRegister dst, Operand src) { \
+ vinstr(0x##opcode, dst, xmm0, src, kNoPrefix, k##escape, kWIG); \
+ }
+
+ SSE_UNOP_INSTRUCTION_LIST(DECLARE_SSE_UNOP_AND_AVX)
+#undef DECLARE_SSE_UNOP_AND_AVX
+
#define DECLARE_SSE2_INSTRUCTION(instruction, prefix, escape, opcode) \
void instruction(XMMRegister dst, XMMRegister src) { \
instruction(dst, Operand(src)); \
@@ -1730,6 +1688,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
SSE2_INSTRUCTION_LIST(DECLARE_SSE2_INSTRUCTION)
+ SSE2_INSTRUCTION_LIST_SD(DECLARE_SSE2_INSTRUCTION)
#undef DECLARE_SSE2_INSTRUCTION
#define DECLARE_SSE2_AVX_INSTRUCTION(instruction, prefix, escape, opcode) \
@@ -1741,6 +1700,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
SSE2_INSTRUCTION_LIST(DECLARE_SSE2_AVX_INSTRUCTION)
+ SSE2_INSTRUCTION_LIST_SD(DECLARE_SSE2_AVX_INSTRUCTION)
#undef DECLARE_SSE2_AVX_INSTRUCTION
#define DECLARE_SSSE3_INSTRUCTION(instruction, prefix, escape1, escape2, \
@@ -1909,7 +1869,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void emit_farith(int b1, int b2, int i);
// Emit vex prefix
- enum SIMDPrefix { kNone = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
+ enum SIMDPrefix { kNoPrefix = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128, kLZ = kL128 };
enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 };
enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 };
@@ -1928,6 +1888,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
inline void emit_disp(Label* L, Displacement::Type type);
inline void emit_near_disp(Label* L);
+ void sse_instr(XMMRegister dst, Operand src, byte prefix, byte opcode);
void sse2_instr(XMMRegister dst, Operand src, byte prefix, byte escape,
byte opcode);
void ssse3_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index 3d22b9fbc8..e92fb3b5f7 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -158,16 +158,23 @@ void MacroAssembler::PushRoot(RootIndex index) {
}
}
-void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
- unsigned higher_limit, Register scratch,
- Label* on_in_range,
- Label::Distance near_jump) {
+void MacroAssembler::CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit, Register scratch) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_LT(lower_limit, higher_limit);
if (lower_limit != 0) {
lea(scratch, Operand(value, 0u - lower_limit));
cmp(scratch, Immediate(higher_limit - lower_limit));
} else {
cmp(value, Immediate(higher_limit));
}
+}
+
+void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit, Register scratch,
+ Label* on_in_range,
+ Label::Distance near_jump) {
+ CompareRange(value, lower_limit, higher_limit, scratch);
j(below_equal, on_in_range, near_jump);
}
@@ -199,7 +206,11 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
Register scratch) {
- // TODO(jgruber): Add support for enable_root_relative_access.
+ if (root_array_available() && options().enable_root_relative_access) {
+ intptr_t delta =
+ RootRegisterOffsetForExternalReference(isolate(), reference);
+ return Operand(kRootRegister, delta);
+ }
if (root_array_available() && options().isolate_independent_code) {
if (IsAddressableThroughRootRegister(isolate(), reference)) {
// Some external references can be efficiently loaded as an offset from
@@ -719,14 +730,15 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
}
-void MacroAssembler::CmpInstanceTypeRange(Register map, Register scratch,
+void MacroAssembler::CmpInstanceTypeRange(Register map,
+ Register instance_type_out,
+ Register scratch,
InstanceType lower_limit,
InstanceType higher_limit) {
ASM_CODE_COMMENT(this);
DCHECK_LT(lower_limit, higher_limit);
- movzx_w(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
- lea(scratch, Operand(scratch, 0u - lower_limit));
- cmp(scratch, Immediate(higher_limit - lower_limit));
+ movzx_w(instance_type_out, FieldOperand(map, Map::kInstanceTypeOffset));
+ CompareRange(instance_type_out, lower_limit, higher_limit, scratch);
}
void MacroAssembler::AssertSmi(Register object) {
@@ -758,7 +770,7 @@ void MacroAssembler::AssertFunction(Register object, Register scratch) {
Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
Push(object);
LoadMap(object, object);
- CmpInstanceTypeRange(object, scratch, FIRST_JS_FUNCTION_TYPE,
+ CmpInstanceTypeRange(object, scratch, scratch, FIRST_JS_FUNCTION_TYPE,
LAST_JS_FUNCTION_TYPE);
Pop(object);
Check(below_equal, AbortReason::kOperandIsNotAFunction);
@@ -952,7 +964,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
bind(&check_offset);
cmp(bytes_scratch, kStackPageSize);
- j(greater, &touch_next_page);
+ j(greater_equal, &touch_next_page);
sub(esp, bytes_scratch);
}
@@ -960,7 +972,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
void TurboAssembler::AllocateStackSpace(int bytes) {
ASM_CODE_COMMENT(this);
DCHECK_GE(bytes, 0);
- while (bytes > kStackPageSize) {
+ while (bytes >= kStackPageSize) {
sub(esp, Immediate(kStackPageSize));
mov(Operand(esp, 0), Immediate(0));
bytes -= kStackPageSize;
@@ -1243,8 +1255,10 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
- cmp(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
- j(equal, &regular_invoke, Label::kFar);
+ if (kDontAdaptArgumentsSentinel != 0) {
+ cmp(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
+ j(equal, &regular_invoke, Label::kFar);
+ }
// If overapplication or if the actual argument count is equal to the
// formal parameter count, no need to push extra undefined values.
@@ -1309,8 +1323,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
int3(); // This should be unreachable.
}
@@ -1322,7 +1336,8 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count) {
ASM_CODE_COMMENT(this);
- FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
Push(expected_parameter_count);
@@ -1584,21 +1599,12 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
}
}
-void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
+void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src,
+ uint8_t imm8) {
if (imm8 == 0) {
Movd(dst, src);
return;
}
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpextrd(dst, src, imm8);
- return;
- }
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pextrd(dst, src, imm8);
- return;
- }
// Without AVX or SSE, we can only have 64-bit values in xmm registers.
// We don't have an xmm scratch register, so move the data via the stack. This
// path is rarely required, so it's acceptable to be slow.
@@ -1609,43 +1615,8 @@ void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
add(esp, Immediate(kDoubleSize));
}
-void TurboAssembler::Pinsrb(XMMRegister dst, Operand src, int8_t imm8) {
- Pinsrb(dst, dst, src, imm8);
-}
-
-void TurboAssembler::Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2,
- int8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpinsrb(dst, src1, src2, imm8);
- return;
- }
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- if (dst != src1) {
- movaps(dst, src1);
- }
- pinsrb(dst, src2, imm8);
- return;
- }
- FATAL("no AVX or SSE4.1 support");
-}
-
-void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
- uint8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpinsrd(dst, src1, src2, imm8);
- return;
- }
- if (dst != src1) {
- movaps(dst, src1);
- }
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pinsrd(dst, src2, imm8);
- return;
- }
+void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
+ uint32_t* load_pc_offset) {
// Without AVX or SSE, we can only have 64-bit values in xmm registers.
// We don't have an xmm scratch register, so move the data via the stack. This
// path is rarely required, so it's acceptable to be slow.
@@ -1654,10 +1625,10 @@ void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
// Write original content of {dst} to the stack.
movsd(Operand(esp, 0), dst);
// Overwrite the portion specified in {imm8}.
- if (src2.is_reg_only()) {
- mov(Operand(esp, imm8 * kUInt32Size), src2.reg());
+ if (src.is_reg_only()) {
+ mov(Operand(esp, imm8 * kUInt32Size), src.reg());
} else {
- movss(dst, src2);
+ movss(dst, src);
movss(Operand(esp, imm8 * kUInt32Size), dst);
}
// Load back the full value into {dst}.
@@ -1665,29 +1636,6 @@ void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
add(esp, Immediate(kDoubleSize));
}
-void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, uint8_t imm8) {
- Pinsrd(dst, dst, src, imm8);
-}
-
-void TurboAssembler::Pinsrw(XMMRegister dst, Operand src, int8_t imm8) {
- Pinsrw(dst, dst, src, imm8);
-}
-
-void TurboAssembler::Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2,
- int8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpinsrw(dst, src1, src2, imm8);
- return;
- } else {
- if (dst != src1) {
- movaps(dst, src1);
- }
- pinsrw(dst, src2, imm8);
- return;
- }
-}
-
void TurboAssembler::Lzcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
@@ -1808,7 +1756,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
PrepareCallCFunction(1, eax);
mov(Operand(esp, 0), Immediate(static_cast<int>(reason)));
CallCFunction(ExternalReference::abort_with_reason(), 1);
@@ -1821,7 +1769,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index bf8f356e8c..ce02c0e294 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -199,6 +199,8 @@ class V8_EXPORT_PRIVATE TurboAssembler
SmiUntag(output);
}
+ void SmiToInt32(Register reg) { SmiUntag(reg); }
+
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in esp[0], esp[4],
// etc., not pushed. The argument count assumes all arguments are word sized.
@@ -302,57 +304,13 @@ class V8_EXPORT_PRIVATE TurboAssembler
// may be bigger than 2^16 - 1. Requires a scratch register.
void Ret(int bytes_dropped, Register scratch);
- // Defined here because some callers take a pointer to member functions.
- AVX_OP(Pcmpeqb, pcmpeqb)
- AVX_OP(Pcmpeqw, pcmpeqw)
- AVX_OP(Pcmpeqd, pcmpeqd)
- AVX_OP_SSE4_1(Pcmpeqq, pcmpeqq)
-
-// Macro for instructions that have 2 operands for AVX version and 1 operand for
-// SSE version. Will move src1 to dst if dst != src1.
-#define AVX_OP3_WITH_MOVE(macro_name, name, dst_type, src_type) \
- void macro_name(dst_type dst, dst_type src1, src_type src2) { \
- if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope scope(this, AVX); \
- v##name(dst, src1, src2); \
- } else { \
- if (dst != src1) { \
- movaps(dst, src1); \
- } \
- name(dst, src2); \
- } \
- }
- AVX_OP3_WITH_MOVE(Movlps, movlps, XMMRegister, Operand)
- AVX_OP3_WITH_MOVE(Movhps, movhps, XMMRegister, Operand)
-#undef AVX_OP3_WITH_MOVE
-
- // TODO(zhin): Remove after moving more definitions into SharedTurboAssembler.
- void Movlps(Operand dst, XMMRegister src) {
- SharedTurboAssembler::Movlps(dst, src);
- }
- void Movhps(Operand dst, XMMRegister src) {
- SharedTurboAssembler::Movhps(dst, src);
- }
-
- void Pextrd(Register dst, XMMRegister src, uint8_t imm8);
- void Pinsrb(XMMRegister dst, Register src, int8_t imm8) {
- Pinsrb(dst, Operand(src), imm8);
- }
- void Pinsrb(XMMRegister dst, Operand src, int8_t imm8);
- // Moves src1 to dst if AVX is not supported.
- void Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8);
- void Pinsrd(XMMRegister dst, Register src, uint8_t imm8) {
- Pinsrd(dst, Operand(src), imm8);
- }
- void Pinsrd(XMMRegister dst, Operand src, uint8_t imm8);
- // Moves src1 to dst if AVX is not supported.
- void Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
- void Pinsrw(XMMRegister dst, Register src, int8_t imm8) {
- Pinsrw(dst, Operand(src), imm8);
+ void PextrdPreSse41(Register dst, XMMRegister src, uint8_t imm8);
+ void PinsrdPreSse41(XMMRegister dst, Register src, uint8_t imm8,
+ uint32_t* load_pc_offset) {
+ PinsrdPreSse41(dst, Operand(src), imm8, load_pc_offset);
}
- void Pinsrw(XMMRegister dst, Operand src, int8_t imm8);
- // Moves src1 to dst if AVX is not supported.
- void Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8);
+ void PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
+ uint32_t* load_pc_offset);
// Expression support
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
@@ -477,7 +435,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
// Checks if value is in range [lower_limit, higher_limit] using a single
- // comparison.
+ // comparison. Flags CF=1 or ZF=1 indicate the value is in the range
+ // (condition below_equal). It is valid, that |value| == |scratch| as far as
+ // this function is concerned.
+ void CompareRange(Register value, unsigned lower_limit, unsigned higher_limit,
+ Register scratch);
void JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit, Register scratch,
Label* on_in_range,
@@ -561,8 +523,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
//
// Always use unsigned comparisons: below_equal for a positive
// result.
- void CmpInstanceTypeRange(Register map, Register scratch,
- InstanceType lower_limit,
+ void CmpInstanceTypeRange(Register map, Register instance_type_out,
+ Register scratch, InstanceType lower_limit,
InstanceType higher_limit);
// Smi tagging support.
diff --git a/deps/v8/src/codegen/ia32/sse-instr.h b/deps/v8/src/codegen/ia32/sse-instr.h
index ef81e1014f..ec630dfa9d 100644
--- a/deps/v8/src/codegen/ia32/sse-instr.h
+++ b/deps/v8/src/codegen/ia32/sse-instr.h
@@ -5,6 +5,14 @@
#ifndef V8_CODEGEN_IA32_SSE_INSTR_H_
#define V8_CODEGEN_IA32_SSE_INSTR_H_
+// SSE/SSE2 instructions whose AVX version has two operands.
+#define SSE_UNOP_INSTRUCTION_LIST(V) \
+ V(sqrtps, 0F, 51) \
+ V(rsqrtps, 0F, 52) \
+ V(rcpps, 0F, 53) \
+ V(cvtps2pd, 0F, 5A) \
+ V(cvtdq2ps, 0F, 5B)
+
#define SSE2_INSTRUCTION_LIST(V) \
V(packsswb, 66, 0F, 63) \
V(packssdw, 66, 0F, 6B) \
@@ -63,6 +71,17 @@
V(punpckhqdq, 66, 0F, 6D) \
V(pxor, 66, 0F, EF)
+// Instructions dealing with scalar double-precision values.
+#define SSE2_INSTRUCTION_LIST_SD(V) \
+ V(sqrtsd, F2, 0F, 51) \
+ V(addsd, F2, 0F, 58) \
+ V(mulsd, F2, 0F, 59) \
+ V(cvtsd2ss, F2, 0F, 5A) \
+ V(subsd, F2, 0F, 5C) \
+ V(minsd, F2, 0F, 5D) \
+ V(divsd, F2, 0F, 5E) \
+ V(maxsd, F2, 0F, 5F)
+
#define SSSE3_INSTRUCTION_LIST(V) \
V(pshufb, 66, 0F, 38, 00) \
V(phaddw, 66, 0F, 38, 01) \
diff --git a/deps/v8/src/codegen/loong64/assembler-loong64-inl.h b/deps/v8/src/codegen/loong64/assembler-loong64-inl.h
index 597d5e048e..eb7cf3d398 100644
--- a/deps/v8/src/codegen/loong64/assembler-loong64-inl.h
+++ b/deps/v8/src/codegen/loong64/assembler-loong64-inl.h
@@ -95,7 +95,7 @@ HeapObject RelocInfo::target_object() {
Object(Assembler::target_address_at(pc_, constant_pool_)));
}
-HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
return target_object();
}
diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
index 1710c40051..6c1fa8e729 100644
--- a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
@@ -3030,8 +3030,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
break_(0xCC);
}
@@ -3044,7 +3044,8 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
Register actual_parameter_count) {
// Load receiver to pass it later to DebugOnFunctionCall hook.
LoadReceiver(t0, actual_parameter_count);
- FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
Push(expected_parameter_count);
@@ -3392,7 +3393,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
PrepareCallCFunction(0, a0);
li(a0, Operand(static_cast<int>(reason)));
CallCFunction(ExternalReference::abort_with_reason(), 1);
@@ -3405,7 +3406,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
@@ -3659,7 +3660,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
Branch(not_smi_label, ne, scratch, Operand(zero_reg));
}
-void MacroAssembler::AssertNotSmi(Register object) {
+void TurboAssembler::AssertNotSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
@@ -3669,7 +3670,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
-void MacroAssembler::AssertSmi(Register object) {
+void TurboAssembler::AssertSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
index ef670fd1cd..c34a5bc18e 100644
--- a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
@@ -420,6 +420,19 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SmiUntag(Register reg) { SmiUntag(reg, reg); }
+ // On LoongArch64, we should sign-extend 32-bit values.
+ void SmiToInt32(Register smi) {
+ if (FLAG_enable_slow_asserts) {
+ AssertSmi(smi);
+ }
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
+ SmiUntag(smi);
+ }
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
+
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
@@ -996,10 +1009,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Jump if the register contains a non-smi.
void JumpIfNotSmi(Register value, Label* not_smi_label);
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object);
- void AssertSmi(Register object);
-
// Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object);
diff --git a/deps/v8/src/codegen/macro-assembler.h b/deps/v8/src/codegen/macro-assembler.h
index 02fa1cf3f9..448807b20e 100644
--- a/deps/v8/src/codegen/macro-assembler.h
+++ b/deps/v8/src/codegen/macro-assembler.h
@@ -94,13 +94,13 @@ class V8_NODISCARD FrameScope {
type_(type),
old_has_frame_(tasm->has_frame()) {
tasm->set_has_frame(true);
- if (type != StackFrame::MANUAL && type_ != StackFrame::NONE) {
+ if (type != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) {
tasm->EnterFrame(type);
}
}
~FrameScope() {
- if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) {
+ if (type_ != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) {
tasm_->LeaveFrame(type_);
}
tasm_->set_has_frame(old_has_frame_);
@@ -110,8 +110,8 @@ class V8_NODISCARD FrameScope {
#ifdef V8_CODE_COMMENTS
const char* frame_name(StackFrame::Type type) {
switch (type) {
- case StackFrame::NONE:
- return "Frame: NONE";
+ case StackFrame::NO_FRAME_TYPE:
+ return "Frame: NO_FRAME_TYPE";
case StackFrame::MANUAL:
return "Frame: MANUAL";
#define FRAME_TYPE_CASE(type, field) \
@@ -145,7 +145,7 @@ class V8_NODISCARD FrameAndConstantPoolScope {
if (FLAG_enable_embedded_constant_pool) {
masm->set_constant_pool_available(true);
}
- if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) {
+ if (type_ != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) {
masm->EnterFrame(type, !old_constant_pool_available_);
}
}
@@ -194,7 +194,7 @@ class V8_NODISCARD ConstantPoolUnavailableScope {
class V8_NODISCARD AllowExternalCallThatCantCauseGC : public FrameScope {
public:
explicit AllowExternalCallThatCantCauseGC(MacroAssembler* masm)
- : FrameScope(masm, StackFrame::NONE) {}
+ : FrameScope(masm, StackFrame::NO_FRAME_TYPE) {}
};
// Prevent the use of the RootArray during the lifetime of this
diff --git a/deps/v8/src/codegen/mips/assembler-mips-inl.h b/deps/v8/src/codegen/mips/assembler-mips-inl.h
index d00da6efba..ea983668a4 100644
--- a/deps/v8/src/codegen/mips/assembler-mips-inl.h
+++ b/deps/v8/src/codegen/mips/assembler-mips-inl.h
@@ -166,7 +166,7 @@ HeapObject RelocInfo::target_object() {
Object(Assembler::target_address_at(pc_, constant_pool_)));
}
-HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
return target_object();
}
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index d30e5ee2e1..e7bb08ef26 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -4397,8 +4397,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
break_(0xCC);
}
@@ -4418,8 +4418,8 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
// Load receiver to pass it later to DebugOnFunctionCall hook.
LoadReceiver(t0, actual_parameter_count);
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
Push(expected_parameter_count);
@@ -4745,7 +4745,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
PrepareCallCFunction(0, a0);
li(a0, Operand(static_cast<int>(reason)));
CallCFunction(ExternalReference::abort_with_reason(), 1);
@@ -4758,7 +4758,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index f467f83bd0..ba1e94ac92 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -460,6 +460,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SmiUntag(Register dst, Register src) { sra(dst, src, kSmiTagSize); }
+ void SmiToInt32(Register smi) { SmiUntag(smi); }
+
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64-inl.h b/deps/v8/src/codegen/mips64/assembler-mips64-inl.h
index 2924b661f2..aaced78154 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/codegen/mips64/assembler-mips64-inl.h
@@ -145,7 +145,7 @@ HeapObject RelocInfo::target_object() {
Object(Assembler::target_address_at(pc_, constant_pool_)));
}
-HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
return target_object();
}
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index 3cf1ec9fd2..28fd588a7c 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -4921,8 +4921,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
break_(0xCC);
}
@@ -4943,8 +4943,8 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
// Load receiver to pass it later to DebugOnFunctionCall hook.
LoadReceiver(t0, actual_parameter_count);
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
Push(expected_parameter_count);
@@ -5276,7 +5276,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
PrepareCallCFunction(0, a0);
li(a0, Operand(static_cast<int>(reason)));
CallCFunction(ExternalReference::abort_with_reason(), 1);
@@ -5289,7 +5289,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
@@ -5546,7 +5546,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
}
-void MacroAssembler::AssertNotSmi(Register object) {
+void TurboAssembler::AssertNotSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
@@ -5556,7 +5556,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
-void MacroAssembler::AssertSmi(Register object) {
+void TurboAssembler::AssertSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index a0ebe35a93..a42fe1a6d0 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -485,6 +485,19 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SmiUntag(Register reg) { SmiUntag(reg, reg); }
+ // On MIPS64, we should sign-extend 32-bit values.
+ void SmiToInt32(Register smi) {
+ if (FLAG_enable_slow_asserts) {
+ AssertSmi(smi);
+ }
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
+ SmiUntag(smi);
+ }
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
+
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
@@ -1183,10 +1196,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void JumpIfNotSmi(Register value, Label* not_smi_label,
BranchDelaySlot bd = PROTECT);
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object);
- void AssertSmi(Register object);
-
// Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object);
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc-inl.h b/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
index d8cd524451..a4917192d8 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
@@ -159,10 +159,10 @@ HeapObject RelocInfo::target_object() {
}
}
-HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
if (IsCompressedEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(DecompressTaggedAny(
- isolate,
+ cage_base,
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {
return target_object();
diff --git a/deps/v8/src/codegen/ppc/constants-ppc.h b/deps/v8/src/codegen/ppc/constants-ppc.h
index 693f13d43e..4f17f08969 100644
--- a/deps/v8/src/codegen/ppc/constants-ppc.h
+++ b/deps/v8/src/codegen/ppc/constants-ppc.h
@@ -64,7 +64,7 @@ constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
// Used to encode a boolean value when emitting 32 bit
// opcodes which will indicate the presence of function descriptors
-constexpr int kHasFunctionDescriptorBitShift = 9;
+constexpr int kHasFunctionDescriptorBitShift = 4;
constexpr int kHasFunctionDescriptorBitMask = 1
<< kHasFunctionDescriptorBitShift;
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index a1ba6f8ddf..aa36511a55 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -1474,9 +1474,11 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
- mov(r0, Operand(kDontAdaptArgumentsSentinel));
- CmpS64(expected_parameter_count, r0);
- beq(&regular_invoke);
+ if (kDontAdaptArgumentsSentinel != 0) {
+ mov(r0, Operand(kDontAdaptArgumentsSentinel));
+ CmpS64(expected_parameter_count, r0);
+ beq(&regular_invoke);
+ }
// If overapplication or if the actual argument count is equal to the
// formal parameter count, no need to push extra undefined values.
@@ -1521,8 +1523,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
bkpt(0);
}
@@ -1546,8 +1548,8 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
{
// Load receiver to pass it later to DebugOnFunctionCall hook.
LoadReceiver(r7, actual_parameter_count);
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
Push(expected_parameter_count);
@@ -1702,16 +1704,28 @@ void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
cmpi(type_reg, Operand(type));
}
+void MacroAssembler::CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_LT(lower_limit, higher_limit);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ if (lower_limit != 0) {
+ mov(scratch, Operand(lower_limit));
+ sub(scratch, value, scratch);
+ cmpli(scratch, Operand(higher_limit - lower_limit));
+ } else {
+ mov(scratch, Operand(higher_limit));
+ CmpU64(value, scratch);
+ }
+}
+
void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
InstanceType lower_limit,
InstanceType higher_limit) {
DCHECK_LT(lower_limit, higher_limit);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
LoadU16(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- mov(scratch, Operand(lower_limit));
- sub(scratch, type_reg, scratch);
- cmpli(scratch, Operand(higher_limit - lower_limit));
+ CompareRange(type_reg, lower_limit, higher_limit);
}
void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
@@ -1898,15 +1912,7 @@ void TurboAssembler::MaxF64(DoubleRegister dst, DoubleRegister lhs,
void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit,
Label* on_in_range) {
- Register scratch = r0;
- if (lower_limit != 0) {
- mov(scratch, Operand(lower_limit));
- sub(scratch, value, scratch);
- cmpli(scratch, Operand(higher_limit - lower_limit));
- } else {
- mov(scratch, Operand(higher_limit));
- CmpU64(value, scratch);
- }
+ CompareRange(value, lower_limit, higher_limit);
ble(on_in_range);
}
@@ -2080,7 +2086,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
mov(r3, Operand(static_cast<int>(reason)));
PrepareCallCFunction(1, r4);
CallCFunction(ExternalReference::abort_with_reason(), 1);
@@ -2093,7 +2099,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
@@ -2115,7 +2121,7 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)), r0);
}
-void MacroAssembler::AssertNotSmi(Register object) {
+void TurboAssembler::AssertNotSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
@@ -2123,7 +2129,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
-void MacroAssembler::AssertSmi(Register object) {
+void TurboAssembler::AssertSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
@@ -3059,6 +3065,11 @@ void TurboAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs,
frsp(dst, dst, r);
}
+void TurboAssembler::CopySignF64(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs, RCBit r) {
+ fcpsgn(dst, rhs, lhs, r);
+}
+
void MacroAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch,
CRegister cr) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
@@ -3111,7 +3122,7 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
#define GenerateMemoryOperation(reg, mem, ri_op, rr_op) \
{ \
- int64_t offset = mem.offset(); \
+ int64_t offset = mem.offset(); \
\
if (mem.rb() == no_reg) { \
if (!is_int16(offset)) { \
@@ -3140,7 +3151,7 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
#define GenerateMemoryOperationWithAlign(reg, mem, ri_op, rr_op) \
{ \
- int64_t offset = mem.offset(); \
+ int64_t offset = mem.offset(); \
int misaligned = (offset & 3); \
\
if (mem.rb() == no_reg) { \
@@ -3229,7 +3240,10 @@ void TurboAssembler::StoreSimd128(Simd128Register src, const MemOperand& mem) {
#define GenerateMemoryLEOperation(reg, mem, op) \
{ \
if (mem.offset() == 0) { \
- op(reg, mem); \
+ if (mem.rb() != no_reg) \
+ op(reg, mem); \
+ else \
+ op(reg, MemOperand(r0, mem.ra())); \
} else if (is_int16(mem.offset())) { \
if (mem.rb() != no_reg) \
addi(scratch, mem.rb(), Operand(mem.offset())); \
@@ -3508,6 +3522,27 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
addi(sp, sp, Operand(2 * kSimd128Size));
}
+void TurboAssembler::ByteReverseU16(Register dst, Register val) {
+ subi(sp, sp, Operand(kSystemPointerSize));
+ sth(val, MemOperand(sp));
+ lhbrx(dst, MemOperand(r0, sp));
+ addi(sp, sp, Operand(kSystemPointerSize));
+}
+
+void TurboAssembler::ByteReverseU32(Register dst, Register val) {
+ subi(sp, sp, Operand(kSystemPointerSize));
+ stw(val, MemOperand(sp));
+ lwbrx(dst, MemOperand(r0, sp));
+ addi(sp, sp, Operand(kSystemPointerSize));
+}
+
+void TurboAssembler::ByteReverseU64(Register dst, Register val) {
+ subi(sp, sp, Operand(kSystemPointerSize));
+ std(val, MemOperand(sp));
+ ldbrx(dst, MemOperand(r0, sp));
+ addi(sp, sp, Operand(kSystemPointerSize));
+}
+
void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
CmpS64(x, Operand(y), r0);
beq(dest);
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index 4a28b88384..81763f13f6 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -280,6 +280,174 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
RCBit r = LeaveRC);
void DivF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
RCBit r = LeaveRC);
+ void CopySignF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs,
+ RCBit r = LeaveRC);
+
+ template <class _type>
+ void SignedExtend(Register dst, Register value) {
+ switch (sizeof(_type)) {
+ case 1:
+ extsb(dst, value);
+ break;
+ case 2:
+ extsh(dst, value);
+ break;
+ case 4:
+ extsw(dst, value);
+ break;
+ case 8:
+ if (dst != value) mr(dst, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ template <class _type>
+ void ZeroExtend(Register dst, Register value) {
+ switch (sizeof(_type)) {
+ case 1:
+ ZeroExtByte(dst, value);
+ break;
+ case 2:
+ ZeroExtHalfWord(dst, value);
+ break;
+ case 4:
+ ZeroExtWord32(dst, value);
+ break;
+ case 8:
+ if (dst != value) mr(dst, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ template <class _type>
+ void ExtendValue(Register dst, Register value) {
+ if (std::is_signed<_type>::value) {
+ SignedExtend<_type>(dst, value);
+ } else {
+ ZeroExtend<_type>(dst, value);
+ }
+ }
+
+ template <class _type>
+ void LoadReserve(Register output, MemOperand dst) {
+ switch (sizeof(_type)) {
+ case 1:
+ lbarx(output, dst);
+ break;
+ case 2:
+ lharx(output, dst);
+ break;
+ case 4:
+ lwarx(output, dst);
+ break;
+ case 8:
+ ldarx(output, dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ if (std::is_signed<_type>::value) {
+ SignedExtend<_type>(output, output);
+ }
+ }
+
+ template <class _type>
+ void StoreConditional(Register value, MemOperand dst) {
+ switch (sizeof(_type)) {
+ case 1:
+ stbcx(value, dst);
+ break;
+ case 2:
+ sthcx(value, dst);
+ break;
+ case 4:
+ stwcx(value, dst);
+ break;
+ case 8:
+ stdcx(value, dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ template <class _type>
+ void AtomicCompareExchange(MemOperand dst, Register old_value,
+ Register new_value, Register output,
+ Register scratch) {
+ Label loop;
+ Label exit;
+ if (sizeof(_type) != 8) {
+ ExtendValue<_type>(scratch, old_value);
+ old_value = scratch;
+ }
+ lwsync();
+ bind(&loop);
+ LoadReserve<_type>(output, dst);
+ cmp(output, old_value, cr0);
+ bne(&exit, cr0);
+ StoreConditional<_type>(new_value, dst);
+ bne(&loop, cr0);
+ bind(&exit);
+ sync();
+ }
+
+ template <class _type>
+ void AtomicExchange(MemOperand dst, Register new_value, Register output) {
+ Label exchange;
+ lwsync();
+ bind(&exchange);
+ LoadReserve<_type>(output, dst);
+ StoreConditional<_type>(new_value, dst);
+ bne(&exchange, cr0);
+ sync();
+ }
+
+ template <class _type, class bin_op>
+ void AtomicOps(MemOperand dst, Register value, Register output,
+ Register result, bin_op op) {
+ Label binop;
+ lwsync();
+ bind(&binop);
+ switch (sizeof(_type)) {
+ case 1:
+ lbarx(output, dst);
+ break;
+ case 2:
+ lharx(output, dst);
+ break;
+ case 4:
+ lwarx(output, dst);
+ break;
+ case 8:
+ ldarx(output, dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ op(result, output, value);
+ switch (sizeof(_type)) {
+ case 1:
+ stbcx(result, dst);
+ break;
+ case 2:
+ sthcx(result, dst);
+ break;
+ case 4:
+ stwcx(result, dst);
+ break;
+ case 8:
+ stdcx(result, dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ bne(&binop, cr0);
+ sync();
+ }
void Push(Register src) { push(src); }
// Push a handle.
@@ -431,6 +599,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Simd128Register scratch);
void SwapSimd128(MemOperand src, MemOperand dst, Simd128Register scratch);
+ void ByteReverseU16(Register dst, Register val);
+ void ByteReverseU32(Register dst, Register val);
+ void ByteReverseU64(Register dst, Register val);
+
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, non-register arguments must be stored in
// sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
@@ -595,6 +767,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
ShiftRightS64(dst, src, Operand(kSmiShift), rc);
}
}
+ void SmiToInt32(Register smi) {
+ if (FLAG_enable_slow_asserts) {
+ AssertSmi(smi);
+ }
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
+ SmiUntag(smi);
+ }
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
void ZeroExtByte(Register dst, Register src);
void ZeroExtHalfWord(Register dst, Register src);
@@ -1023,6 +1206,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Checks if value is in range [lower_limit, higher_limit] using a single
// comparison.
+ void CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit);
void JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit, Label* on_in_range);
@@ -1109,10 +1294,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bne(not_smi_label, cr0);
}
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object);
- void AssertSmi(Register object);
-
#if !defined(V8_COMPRESS_POINTERS) && !defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
// Ensure it is permissible to read/write int value directly from
// upper half of the smi.
diff --git a/deps/v8/src/codegen/reloc-info.h b/deps/v8/src/codegen/reloc-info.h
index 918c93b13f..2479a926e3 100644
--- a/deps/v8/src/codegen/reloc-info.h
+++ b/deps/v8/src/codegen/reloc-info.h
@@ -255,8 +255,9 @@ class RelocInfo {
V8_INLINE HeapObject target_object();
// In GC operations, we don't have a host_ pointer. Retrieving a target
- // for COMPRESSED_EMBEDDED_OBJECT mode requires an isolate.
- V8_INLINE HeapObject target_object_no_host(Isolate* isolate);
+ // for COMPRESSED_EMBEDDED_OBJECT mode requires a pointer compression cage
+ // base value.
+ V8_INLINE HeapObject target_object_no_host(PtrComprCageBase cage_base);
V8_INLINE Handle<HeapObject> target_object_handle(Assembler* origin);
V8_INLINE void set_target_object(
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h b/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h
index e3ac9b83f4..be3e59c7e4 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64-inl.h
@@ -170,10 +170,10 @@ HeapObject RelocInfo::target_object() {
}
}
-HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
if (IsCompressedEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(DecompressTaggedAny(
- isolate,
+ cage_base,
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {
return target_object();
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
index 8cad060a47..47479cd016 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
@@ -576,7 +576,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal,
instr_at_put(pos, instr);
instr_at_put(pos + 4, kNopByte);
} else {
- DCHECK(is_int32(offset));
+ CHECK(is_int32(offset + 0x800));
int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
int32_t Lo12 = (int32_t)offset << 20 >> 20;
@@ -703,7 +703,7 @@ void Assembler::next(Label* L, bool is_internal) {
if (link == kEndOfChain) {
L->Unuse();
} else {
- DCHECK_GT(link, 0);
+ DCHECK_GE(link, 0);
DEBUG_PRINTF("next: %p to %p (%d)\n", L,
reinterpret_cast<Instr*>(buffer_start_ + link), link);
L->link_to(link);
@@ -766,9 +766,9 @@ int Assembler::PatchBranchlongOffset(Address pc, Instr instr_auipc,
Instr instr_jalr, int32_t offset) {
DCHECK(IsAuipc(instr_auipc));
DCHECK(IsJalr(instr_jalr));
+ CHECK(is_int32(offset + 0x800));
int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
int32_t Lo12 = (int32_t)offset << 20 >> 20;
- CHECK(is_int32(offset));
instr_at_put(pc, SetAuipcOffset(Hi20, instr_auipc));
instr_at_put(pc + 4, SetJalrOffset(Lo12, instr_jalr));
DCHECK(offset ==
@@ -1151,6 +1151,16 @@ void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
((vs2.code() & 0x1F) << kRvvVs2Shift);
emit(instr);
}
+
+void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
+ int8_t vs1, VRegister vs2, MaskType mask) {
+ DCHECK(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1 & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
// OPMVV OPFVV
void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, Register rd,
VRegister vs1, VRegister vs2, MaskType mask) {
@@ -1162,10 +1172,10 @@ void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, Register rd,
emit(instr);
}
-// OPIVX OPFVF OPMVX
+// OPIVX OPMVX
void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
Register rs1, VRegister vs2, MaskType mask) {
- DCHECK(opcode == OP_IVX || opcode == OP_FVF || opcode == OP_MVX);
+ DCHECK(opcode == OP_IVX || opcode == OP_MVX);
Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
((vd.code() & 0x1F) << kRvvVdShift) |
((rs1.code() & 0x1F) << kRvvRs1Shift) |
@@ -1173,6 +1183,17 @@ void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
emit(instr);
}
+// OPFVF
+void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
+ FPURegister fs1, VRegister vs2, MaskType mask) {
+ DCHECK(opcode == OP_FVF);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((fs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
// OPMVX
void Assembler::GenInstrV(uint8_t funct6, Register rd, Register rs1,
VRegister vs2, MaskType mask) {
@@ -2485,12 +2506,37 @@ void Assembler::vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
GenInstrV(VMADC_FUNCT6, vd, imm5, vs2, Mask);
}
+void Assembler::vrgather_vv(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ DCHECK_NE(vd, vs1);
+ DCHECK_NE(vd, vs2);
+ GenInstrV(VRGATHER_FUNCT6, OP_IVV, vd, vs1, vs2, mask);
+}
+
+void Assembler::vrgather_vi(VRegister vd, VRegister vs2, int8_t imm5,
+ MaskType mask) {
+ DCHECK_NE(vd, vs2);
+ GenInstrV(VRGATHER_FUNCT6, vd, imm5, vs2, mask);
+}
+
+void Assembler::vrgather_vx(VRegister vd, VRegister vs2, Register rs1,
+ MaskType mask) {
+ DCHECK_NE(vd, vs2);
+ GenInstrV(VRGATHER_FUNCT6, OP_IVX, vd, rs1, vs2, mask);
+}
+
#define DEFINE_OPIVV(name, funct6) \
void Assembler::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
MaskType mask) { \
GenInstrV(funct6, OP_IVV, vd, vs1, vs2, mask); \
}
+#define DEFINE_OPFVV(name, funct6) \
+ void Assembler::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
#define DEFINE_OPIVX(name, funct6) \
void Assembler::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
MaskType mask) { \
@@ -2509,6 +2555,12 @@ void Assembler::vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
GenInstrV(funct6, OP_MVV, vd, vs1, vs2, mask); \
}
+#define DEFINE_OPFVF(name, funct6) \
+ void Assembler::name##_vf(VRegister vd, VRegister vs2, FPURegister fs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
+ }
+
DEFINE_OPIVV(vadd, VADD_FUNCT6)
DEFINE_OPIVX(vadd, VADD_FUNCT6)
DEFINE_OPIVI(vadd, VADD_FUNCT6)
@@ -2517,9 +2569,9 @@ DEFINE_OPIVX(vsub, VSUB_FUNCT6)
DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
-DEFINE_OPIVX(vsaddu, VSADD_FUNCT6)
-DEFINE_OPIVV(vsaddu, VSADD_FUNCT6)
-DEFINE_OPIVI(vsaddu, VSADD_FUNCT6)
+DEFINE_OPIVX(vsaddu, VSADDU_FUNCT6)
+DEFINE_OPIVV(vsaddu, VSADDU_FUNCT6)
+DEFINE_OPIVI(vsaddu, VSADDU_FUNCT6)
DEFINE_OPIVX(vssub, VSSUB_FUNCT6)
DEFINE_OPIVV(vssub, VSSUB_FUNCT6)
DEFINE_OPIVX(vssubu, VSSUBU_FUNCT6)
@@ -2543,9 +2595,6 @@ DEFINE_OPIVI(vor, VOR_FUNCT6)
DEFINE_OPIVV(vxor, VXOR_FUNCT6)
DEFINE_OPIVX(vxor, VXOR_FUNCT6)
DEFINE_OPIVI(vxor, VXOR_FUNCT6)
-DEFINE_OPIVV(vrgather, VRGATHER_FUNCT6)
-DEFINE_OPIVX(vrgather, VRGATHER_FUNCT6)
-DEFINE_OPIVI(vrgather, VRGATHER_FUNCT6)
DEFINE_OPIVX(vslidedown, VSLIDEDOWN_FUNCT6)
DEFINE_OPIVI(vslidedown, VSLIDEDOWN_FUNCT6)
@@ -2592,9 +2641,33 @@ DEFINE_OPMVV(vredmaxu, VREDMAXU_FUNCT6)
DEFINE_OPMVV(vredmax, VREDMAX_FUNCT6)
DEFINE_OPMVV(vredmin, VREDMIN_FUNCT6)
DEFINE_OPMVV(vredminu, VREDMINU_FUNCT6)
+
+DEFINE_OPFVV(vfadd, VFADD_FUNCT6)
+DEFINE_OPFVF(vfadd, VFADD_FUNCT6)
+DEFINE_OPFVV(vfsub, VFSUB_FUNCT6)
+DEFINE_OPFVF(vfsub, VFSUB_FUNCT6)
+DEFINE_OPFVV(vfdiv, VFDIV_FUNCT6)
+DEFINE_OPFVF(vfdiv, VFDIV_FUNCT6)
+DEFINE_OPFVV(vfmul, VFMUL_FUNCT6)
+DEFINE_OPFVF(vfmul, VFMUL_FUNCT6)
+DEFINE_OPFVV(vmfeq, VMFEQ_FUNCT6)
+DEFINE_OPFVV(vmfne, VMFNE_FUNCT6)
+DEFINE_OPFVV(vmflt, VMFLT_FUNCT6)
+DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
+DEFINE_OPFVV(vfmax, VFMAX_FUNCT6)
+DEFINE_OPFVV(vfmin, VFMIN_FUNCT6)
+
+DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
+DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6)
+DEFINE_OPFVV(vfsngjn, VFSGNJN_FUNCT6)
+DEFINE_OPFVF(vfsngjn, VFSGNJN_FUNCT6)
+DEFINE_OPFVV(vfsngjx, VFSGNJX_FUNCT6)
+DEFINE_OPFVF(vfsngjx, VFSGNJX_FUNCT6)
#undef DEFINE_OPIVI
#undef DEFINE_OPIVV
#undef DEFINE_OPIVX
+#undef DEFINE_OPFVV
+#undef DEFINE_OPFVF
void Assembler::vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
TailAgnosticType tail, MaskAgnosticType mask) {
@@ -3584,7 +3657,7 @@ void Assembler::CheckTrampolinePool() {
for (int i = 0; i < unbound_labels_count_; i++) {
int64_t imm64;
imm64 = branch_long_offset(&after_pool);
- DCHECK(is_int32(imm64));
+ CHECK(is_int32(imm64 + 0x800));
int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
auipc(t6, Hi20); // Read PC + Hi20 into t6
@@ -3628,7 +3701,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
int64_t imm = (int64_t)target - (int64_t)pc;
Instr instr = instr_at(pc);
Instr instr1 = instr_at(pc + 1 * kInstrSize);
- DCHECK(is_int32(imm));
+ DCHECK(is_int32(imm + 0x800));
int num = PatchBranchlongOffset(pc, instr, instr1, (int32_t)imm);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
FlushInstructionCache(pc, num * kInstrSize);
@@ -3830,9 +3903,9 @@ void ConstantPool::SetLoadOffsetToConstPoolEntry(int load_offset,
int32_t distance = static_cast<int32_t>(
reinterpret_cast<Address>(entry_offset) -
reinterpret_cast<Address>(assm_->toAddress(load_offset)));
+ CHECK(is_int32(distance + 0x800));
int32_t Hi20 = (((int32_t)distance + 0x800) >> 12);
int32_t Lo12 = (int32_t)distance << 20 >> 20;
- CHECK(is_int32(distance));
assm_->instr_at_put(load_offset, SetAuipcOffset(Hi20, instr_auipc));
assm_->instr_at_put(load_offset + 4, SetLdOffset(Lo12, instr_ld));
}
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.h b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
index 7da77f8e0e..e30254aa65 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
@@ -358,11 +358,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// invalidated. For instance, when the assembler buffer grows or a GC happens
// between Code object allocation and Code object finalization.
void FixOnHeapReferences(bool update_embedded_objects = true);
-
// This function is called when we fallback from on-heap to off-heap
// compilation and patch on-heap references to handles.
void FixOnHeapReferencesToHandles();
-
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
@@ -775,6 +773,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void name##_vx(VRegister vd, VRegister vs2, Register rs1, \
MaskType mask = NoMask);
+#define DEFINE_OPFVV(name, funct6) \
+ void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+#define DEFINE_OPFVF(name, funct6) \
+ void name##_vf(VRegister vd, VRegister vs2, FPURegister fs1, \
+ MaskType mask = NoMask);
+
DEFINE_OPIVV(vadd, VADD_FUNCT6)
DEFINE_OPIVX(vadd, VADD_FUNCT6)
DEFINE_OPIVI(vadd, VADD_FUNCT6)
@@ -784,8 +790,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
DEFINE_OPIVX(vsaddu, VSADD_FUNCT6)
- DEFINE_OPIVV(vsaddu, VSADD_FUNCT6)
- DEFINE_OPIVI(vsaddu, VSADD_FUNCT6)
+ DEFINE_OPIVV(vsaddu, VSADDU_FUNCT6)
+ DEFINE_OPIVI(vsaddu, VSADDU_FUNCT6)
DEFINE_OPIVX(vssub, VSSUB_FUNCT6)
DEFINE_OPIVV(vssub, VSSUB_FUNCT6)
DEFINE_OPIVX(vssubu, VSSUBU_FUNCT6)
@@ -858,15 +864,58 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DEFINE_OPMVV(vredmax, VREDMAX_FUNCT6)
DEFINE_OPMVV(vredmin, VREDMIN_FUNCT6)
DEFINE_OPMVV(vredminu, VREDMINU_FUNCT6)
+
+ DEFINE_OPFVV(vfadd, VFADD_FUNCT6)
+ DEFINE_OPFVF(vfadd, VFADD_FUNCT6)
+ DEFINE_OPFVV(vfsub, VFSUB_FUNCT6)
+ DEFINE_OPFVF(vfsub, VFSUB_FUNCT6)
+ DEFINE_OPFVV(vfdiv, VFDIV_FUNCT6)
+ DEFINE_OPFVF(vfdiv, VFDIV_FUNCT6)
+ DEFINE_OPFVV(vfmul, VFMUL_FUNCT6)
+ DEFINE_OPFVF(vfmul, VFMUL_FUNCT6)
+
+ DEFINE_OPFVV(vmfeq, VMFEQ_FUNCT6)
+ DEFINE_OPFVV(vmfne, VMFNE_FUNCT6)
+ DEFINE_OPFVV(vmflt, VMFLT_FUNCT6)
+ DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
+ DEFINE_OPFVV(vfmax, VMFMAX_FUNCT6)
+ DEFINE_OPFVV(vfmin, VMFMIN_FUNCT6)
+
+ DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
+ DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6)
+ DEFINE_OPFVV(vfsngjn, VFSGNJN_FUNCT6)
+ DEFINE_OPFVF(vfsngjn, VFSGNJN_FUNCT6)
+ DEFINE_OPFVV(vfsngjx, VFSGNJX_FUNCT6)
+ DEFINE_OPFVF(vfsngjx, VFSGNJX_FUNCT6)
+
#undef DEFINE_OPIVI
#undef DEFINE_OPIVV
#undef DEFINE_OPIVX
#undef DEFINE_OPMVV
#undef DEFINE_OPMVX
+#undef DEFINE_OPFVV
+#undef DEFINE_OPFVF
+
+#define DEFINE_VFUNARY(name, funct6, vs1) \
+ void name(VRegister vd, VRegister vs2, MaskType mask = NoMask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
+ DEFINE_VFUNARY(vfcvt_xu_f_v, VFUNARY0_FUNCT6, VFCVT_XU_F_V)
+ DEFINE_VFUNARY(vfcvt_x_f_v, VFUNARY0_FUNCT6, VFCVT_X_F_V)
+ DEFINE_VFUNARY(vfcvt_f_x_v, VFUNARY0_FUNCT6, VFCVT_F_X_V)
+ DEFINE_VFUNARY(vfcvt_f_xu_v, VFUNARY0_FUNCT6, VFCVT_F_XU_V)
+ DEFINE_VFUNARY(vfncvt_f_f_w, VFUNARY0_FUNCT6, VFNCVT_F_F_W)
+
+ DEFINE_VFUNARY(vfclass_v, VFUNARY1_FUNCT6, VFCLASS_V)
+#undef DEFINE_VFUNARY
void vnot_vv(VRegister dst, VRegister src) { vxor_vi(dst, src, -1); }
void vneg_vv(VRegister dst, VRegister src) { vrsub_vx(dst, src, zero_reg); }
+
+ void vfneg_vv(VRegister dst, VRegister src) { vfsngjn_vv(dst, src, src); }
+ void vfabs_vv(VRegister dst, VRegister src) { vfsngjx_vv(dst, src, src); }
// Privileged
void uret();
void sret();
@@ -1166,6 +1215,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
}
+ void set(RoundingMode mode) {
+ if (mode_ != mode) {
+ assm_->addi(kScratchReg, zero_reg, mode << kFcsrFrmShift);
+ assm_->fscsr(kScratchReg);
+ mode_ = mode;
+ }
+ }
void set(Register rd, Register rs1, VSew sew, Vlmul lmul) {
if (sew != sew_ || lmul != lmul_) {
sew_ = sew;
@@ -1188,10 +1244,19 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Vlmul lmul_ = m1;
int32_t vl = 0;
Assembler* assm_;
+ RoundingMode mode_ = RNE;
};
VectorUnit VU;
+ void CheckTrampolinePoolQuick(int extra_instructions = 0) {
+ DEBUG_PRINTF("\tpc_offset:%d %d\n", pc_offset(),
+ next_buffer_check_ - extra_instructions * kInstrSize);
+ if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) {
+ CheckTrampolinePool();
+ }
+ }
+
protected:
// Readable constants for base and offset adjustment helper, these indicate if
// aside from offset, another value like offset + 4 should fit into int16.
@@ -1270,14 +1335,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
bool is_buffer_growth_blocked() const { return block_buffer_growth_; }
- void CheckTrampolinePoolQuick(int extra_instructions = 0) {
- DEBUG_PRINTF("\tpc_offset:%d %d\n", pc_offset(),
- next_buffer_check_ - extra_instructions * kInstrSize);
- if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) {
- CheckTrampolinePool();
- }
- }
-
#ifdef DEBUG
bool EmbeddedObjectMatches(int pc_offset, Handle<Object> object) {
return target_address_at(
@@ -1450,14 +1507,18 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// OPIVV OPFVV OPMVV
void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, VRegister vs1,
VRegister vs2, MaskType mask = NoMask);
+ void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, int8_t vs1,
+ VRegister vs2, MaskType mask = NoMask);
// OPMVV OPFVV
void GenInstrV(uint8_t funct6, Opcode opcode, Register rd, VRegister vs1,
VRegister vs2, MaskType mask = NoMask);
- // OPIVX OPFVF OPMVX
+ // OPIVX OPMVX
void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, Register rs1,
VRegister vs2, MaskType mask = NoMask);
-
+ // OPFVF
+ void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, FPURegister fs1,
+ VRegister vs2, MaskType mask = NoMask);
// OPMVX
void GenInstrV(uint8_t funct6, Register rd, Register rs1, VRegister vs2,
MaskType mask = NoMask);
diff --git a/deps/v8/src/codegen/riscv64/constants-riscv64.h b/deps/v8/src/codegen/riscv64/constants-riscv64.h
index 934b962955..424e966e15 100644
--- a/deps/v8/src/codegen/riscv64/constants-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/constants-riscv64.h
@@ -57,7 +57,7 @@ const uint32_t kLessSignificantWordInDoublewordOffset = 4;
namespace v8 {
namespace internal {
-constexpr size_t kMaxPCRelativeCodeRangeInMB = 4096;
+constexpr size_t kMaxPCRelativeCodeRangeInMB = 4094;
// -----------------------------------------------------------------------------
// Registers and FPURegisters.
@@ -858,6 +858,77 @@ enum Opcode : uint32_t {
RO_V_VREDMINU = OP_MVV | (VREDMINU_FUNCT6 << kRvvFunct6Shift),
VREDMIN_FUNCT6 = 0b000101,
RO_V_VREDMIN = OP_MVV | (VREDMIN_FUNCT6 << kRvvFunct6Shift),
+
+ VFUNARY0_FUNCT6 = 0b010010,
+ RO_V_VFUNARY0 = OP_FVV | (VFUNARY0_FUNCT6 << kRvvFunct6Shift),
+ VFUNARY1_FUNCT6 = 0b010011,
+ RO_V_VFUNARY1 = OP_FVV | (VFUNARY1_FUNCT6 << kRvvFunct6Shift),
+
+ VFCVT_XU_F_V = 0b00000,
+ VFCVT_X_F_V = 0b00001,
+ VFCVT_F_XU_V = 0b00010,
+ VFCVT_F_X_V = 0b00011,
+ VFNCVT_F_F_W = 0b10100,
+
+ VFCLASS_V = 0b10000,
+
+ VFADD_FUNCT6 = 0b000000,
+ RO_V_VFADD_VV = OP_FVV | (VFADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFADD_VF = OP_FVF | (VFADD_FUNCT6 << kRvvFunct6Shift),
+
+ VFSUB_FUNCT6 = 0b000010,
+ RO_V_VFSUB_VV = OP_FVV | (VFSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFSUB_VF = OP_FVF | (VFSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VFDIV_FUNCT6 = 0b100000,
+ RO_V_VFDIV_VV = OP_FVV | (VFDIV_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFDIV_VF = OP_FVF | (VFDIV_FUNCT6 << kRvvFunct6Shift),
+
+ VFMUL_FUNCT6 = 0b100100,
+ RO_V_VFMUL_VV = OP_FVV | (VFMUL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMUL_VF = OP_FVF | (VFMUL_FUNCT6 << kRvvFunct6Shift),
+
+ VMFEQ_FUNCT6 = 0b011000,
+ RO_V_VMFEQ_VV = OP_FVV | (VMFEQ_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMFEQ_VF = OP_FVF | (VMFEQ_FUNCT6 << kRvvFunct6Shift),
+
+ VMFNE_FUNCT6 = 0b011100,
+ RO_V_VMFNE_VV = OP_FVV | (VMFNE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMFNE_VF = OP_FVF | (VMFNE_FUNCT6 << kRvvFunct6Shift),
+
+ VMFLT_FUNCT6 = 0b011011,
+ RO_V_VMFLT_VV = OP_FVV | (VMFLT_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMFLT_VF = OP_FVF | (VMFLT_FUNCT6 << kRvvFunct6Shift),
+
+ VMFLE_FUNCT6 = 0b011001,
+ RO_V_VMFLE_VV = OP_FVV | (VMFLE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMFLE_VF = OP_FVF | (VMFLE_FUNCT6 << kRvvFunct6Shift),
+
+ VMFGE_FUNCT6 = 0b011111,
+ RO_V_VMFGE_VF = OP_FVF | (VMFGE_FUNCT6 << kRvvFunct6Shift),
+
+ VMFGT_FUNCT6 = 0b011101,
+ RO_V_VMFGT_VF = OP_FVF | (VMFGT_FUNCT6 << kRvvFunct6Shift),
+
+ VFMAX_FUNCT6 = 0b000110,
+ RO_V_VFMAX_VV = OP_FVV | (VFMAX_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMAX_VF = OP_FVF | (VFMAX_FUNCT6 << kRvvFunct6Shift),
+
+ VFMIN_FUNCT6 = 0b000100,
+ RO_V_VFMIN_VV = OP_FVV | (VFMIN_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFMIN_VF = OP_FVF | (VFMIN_FUNCT6 << kRvvFunct6Shift),
+
+ VFSGNJ_FUNCT6 = 0b001000,
+ RO_V_VFSGNJ_VV = OP_FVV | (VFSGNJ_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFSGNJ_VF = OP_FVF | (VFSGNJ_FUNCT6 << kRvvFunct6Shift),
+
+ VFSGNJN_FUNCT6 = 0b001001,
+ RO_V_VFSGNJN_VV = OP_FVV | (VFSGNJN_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFSGNJN_VF = OP_FVF | (VFSGNJN_FUNCT6 << kRvvFunct6Shift),
+
+ VFSGNJX_FUNCT6 = 0b001010,
+ RO_V_VFSGNJX_VV = OP_FVV | (VFSGNJX_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VFSGNJX_VF = OP_FVF | (VFSGNJX_FUNCT6 << kRvvFunct6Shift),
};
// ----- Emulated conditions.
@@ -991,6 +1062,13 @@ enum MemoryOdering {
PSIORW = PSI | PSO | PSR | PSW
};
+const int kFloat32ExponentBias = 127;
+const int kFloat32MantissaBits = 23;
+const int kFloat32ExponentBits = 8;
+const int kFloat64ExponentBias = 1023;
+const int kFloat64MantissaBits = 52;
+const int kFloat64ExponentBits = 11;
+
enum FClassFlag {
kNegativeInfinity = 1,
kNegativeNormalNumber = 1 << 1,
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
index f010195890..342660bcc0 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
@@ -1600,7 +1600,7 @@ void TurboAssembler::li(Register dst, Handle<HeapObject> value,
} else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
EmbeddedObjectIndex index = AddEmbeddedObject(value);
DCHECK(is_uint32(index));
- li(dst, Operand(static_cast<int>(index), rmode));
+ li(dst, Operand(index, rmode));
} else {
DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
li(dst, Operand(value.address(), rmode));
@@ -1633,7 +1633,7 @@ static inline int InstrCountForLiLower32Bit(int64_t value) {
}
int TurboAssembler::InstrCountForLi64Bit(int64_t value) {
- if (is_int32(value)) {
+ if (is_int32(value + 0x800)) {
return InstrCountForLiLower32Bit(value);
} else {
return li_estimate(value);
@@ -2045,19 +2045,12 @@ void TurboAssembler::RoundHelper(FPURegister dst, FPURegister src,
// Need at least two FPRs, so check against dst == src == fpu_scratch
DCHECK(!(dst == src && dst == fpu_scratch));
- const int kFloat32ExponentBias = 127;
- const int kFloat32MantissaBits = 23;
- const int kFloat32ExponentBits = 8;
- const int kFloat64ExponentBias = 1023;
- const int kFloat64MantissaBits = 52;
- const int kFloat64ExponentBits = 11;
const int kFloatMantissaBits =
sizeof(F) == 4 ? kFloat32MantissaBits : kFloat64MantissaBits;
const int kFloatExponentBits =
sizeof(F) == 4 ? kFloat32ExponentBits : kFloat64ExponentBits;
const int kFloatExponentBias =
sizeof(F) == 4 ? kFloat32ExponentBias : kFloat64ExponentBias;
-
Label done;
{
@@ -2156,6 +2149,72 @@ void TurboAssembler::RoundHelper(FPURegister dst, FPURegister src,
bind(&done);
}
+// According to JS ECMA specification, for floating-point round operations, if
+// the input is NaN, +/-infinity, or +/-0, the same input is returned as the
+// rounded result; this differs from behavior of RISCV fcvt instructions (which
+// round out-of-range values to the nearest max or min value), therefore special
+// handling is needed by NaN, +/-Infinity, +/-0
+template <typename F>
+void TurboAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch, RoundingMode frm) {
+ VU.set(scratch, std::is_same<F, float>::value ? E32 : E64, m1);
+ // if src is NaN/+-Infinity/+-Zero or if the exponent is larger than # of bits
+ // in mantissa, the result is the same as src, so move src to dest (to avoid
+ // generating another branch)
+
+ // If real exponent (i.e., scratch2 - kFloatExponentBias) is greater than
+ // kFloat32MantissaBits, it means the floating-point value has no fractional
+ // part, thus the input is already rounded, jump to done. Note that, NaN and
+ // Infinity in floating-point representation sets maximal exponent value, so
+ // they also satisfy (scratch2 - kFloatExponentBias >= kFloatMantissaBits),
+ // and JS round semantics specify that rounding of NaN (Infinity) returns NaN
+ // (Infinity), so NaN and Infinity are considered rounded value too.
+ li(scratch, 64 - kFloat32MantissaBits - kFloat32ExponentBits);
+ vsll_vx(v_scratch, src, scratch);
+ li(scratch, 64 - kFloat32ExponentBits);
+ vsrl_vx(v_scratch, v_scratch, scratch);
+ li(scratch, kFloat32ExponentBias + kFloat32MantissaBits);
+ vmslt_vx(v0, v_scratch, scratch);
+
+ VU.set(frm);
+ vmv_vv(dst, src);
+ if (dst == src) {
+ vmv_vv(v_scratch, src);
+ }
+ vfcvt_x_f_v(dst, src, MaskType::Mask);
+ vfcvt_f_x_v(dst, dst, MaskType::Mask);
+
+ // A special handling is needed if the input is a very small positive/negative
+ // number that rounds to zero. JS semantics requires that the rounded result
+ // retains the sign of the input, so a very small positive (negative)
+ // floating-point number should be rounded to positive (negative) 0.
+ if (dst == src) {
+ vfsngj_vv(dst, dst, v_scratch);
+ } else {
+ vfsngj_vv(dst, dst, src);
+ }
+}
+
+void TurboAssembler::Ceil_f(VRegister vdst, VRegister vsrc, Register scratch,
+ VRegister v_scratch) {
+ RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RUP);
+}
+
+void TurboAssembler::Ceil_d(VRegister vdst, VRegister vsrc, Register scratch,
+ VRegister v_scratch) {
+ RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RUP);
+}
+
+void TurboAssembler::Floor_f(VRegister vdst, VRegister vsrc, Register scratch,
+ VRegister v_scratch) {
+ RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RDN);
+}
+
+void TurboAssembler::Floor_d(VRegister vdst, VRegister vsrc, Register scratch,
+ VRegister v_scratch) {
+ RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RDN);
+}
+
void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<double>(dst, src, fpu_scratch, RDN);
@@ -3491,6 +3550,7 @@ void TurboAssembler::LoadAddress(Register dst, Label* target,
RelocInfo::Mode rmode) {
int32_t offset;
if (CalculateOffset(target, &offset, OffsetSize::kOffset32)) {
+ CHECK(is_int32(offset + 0x800));
int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
int32_t Lo12 = (int32_t)offset << 20 >> 20;
auipc(dst, Hi20);
@@ -3668,9 +3728,10 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
- Branch(&regular_invoke, eq, expected_parameter_count,
- Operand(kDontAdaptArgumentsSentinel));
-
+ if (kDontAdaptArgumentsSentinel != 0) {
+ Branch(&regular_invoke, eq, expected_parameter_count,
+ Operand(kDontAdaptArgumentsSentinel));
+ }
// If overapplication or if the actual argument count is equal to the
// formal parameter count, no need to push extra undefined values.
Sub64(expected_parameter_count, expected_parameter_count,
@@ -3717,8 +3778,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
break_(0xCC);
}
@@ -3743,8 +3804,8 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
Register receiver = temps.Acquire();
LoadReceiver(receiver, actual_parameter_count);
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
Push(expected_parameter_count);
@@ -4152,9 +4213,9 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
PrepareCallCFunction(0, a0);
- li(a0, Operand(static_cast<int>(reason)));
+ li(a0, Operand(static_cast<int64_t>(reason)));
CallCFunction(ExternalReference::abort_with_reason(), 1);
return;
}
@@ -4165,7 +4226,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
@@ -4417,6 +4478,14 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
}
}
+void TurboAssembler::SmiToInt32(Register smi) {
+ if (FLAG_enable_slow_asserts) {
+ AssertSmi(smi);
+ }
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
+ SmiUntag(smi);
+}
+
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
DCHECK_EQ(0, kSmiTag);
UseScratchRegisterScope temps(this);
@@ -4433,7 +4502,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
Branch(not_smi_label, ne, scratch, Operand(zero_reg));
}
-void MacroAssembler::AssertNotSmi(Register object) {
+void TurboAssembler::AssertNotSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
@@ -4442,7 +4511,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
-void MacroAssembler::AssertSmi(Register object) {
+void TurboAssembler::AssertSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
index 53e8543429..1dc4d2075c 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
@@ -211,7 +211,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadRootRelative(Register destination, int32_t offset) final;
inline void GenPCRelativeJump(Register rd, int64_t imm32) {
- DCHECK(is_int32(imm32));
+ DCHECK(is_int32(imm32 + 0x800));
int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12);
int32_t Lo12 = (int32_t)imm32 << 20 >> 20;
auipc(rd, Hi20); // Read PC + Hi20 into scratch.
@@ -219,7 +219,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
inline void GenPCRelativeJumpAndLink(Register rd, int64_t imm32) {
- DCHECK(is_int32(imm32));
+ DCHECK(is_int32(imm32 + 0x800));
int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12);
int32_t Lo12 = (int32_t)imm32 << 20 >> 20;
auipc(rd, Hi20); // Read PC + Hi20 into scratch.
@@ -492,6 +492,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
void SmiUntag(Register reg) { SmiUntag(reg, reg); }
+ void SmiToInt32(Register smi);
+
+ // Enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
int CalculateStackPassedDWords(int num_gp_arguments, int num_fp_arguments);
@@ -837,6 +842,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Floor_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
void Ceil_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
+ void Ceil_f(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch);
+
+ void Ceil_d(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch);
+
+ void Floor_f(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch);
+ void Floor_d(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch);
// Jump the register contains a smi.
void JumpIfSmi(Register value, Label* smi_label);
@@ -978,6 +993,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
RoundingMode mode);
+ template <typename F>
+ void RoundHelper(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch, RoundingMode frm);
+
template <typename TruncFunc>
void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result,
TruncFunc trunc);
@@ -1236,9 +1255,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Jump if the register contains a non-smi.
void JumpIfNotSmi(Register value, Label* not_smi_label);
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object);
- void AssertSmi(Register object);
// Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object);
diff --git a/deps/v8/src/codegen/riscv64/register-riscv64.h b/deps/v8/src/codegen/riscv64/register-riscv64.h
index 2d2fccdf3a..14c993512f 100644
--- a/deps/v8/src/codegen/riscv64/register-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/register-riscv64.h
@@ -23,14 +23,14 @@ namespace internal {
// s3: scratch register s4: scratch register 2 used in code-generator-riscv64
// s6: roots in Javascript code s7: context register
// s11: PtrComprCageBaseRegister
-// t3 t5 s10 : scratch register used in scratch_register_list
-
+// t3 t5 : scratch register used in scratch_register_list
+// t6 : call reg.
// t0 t1 t2 t4:caller saved scratch register can be used in macroassembler and
// builtin-riscv64
#define ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(V) \
V(a0) V(a1) V(a2) V(a3) \
V(a4) V(a5) V(a6) V(a7) V(t0) \
- V(t1) V(t2) V(t4) V(s7) V(s8) V(s9)
+ V(t1) V(t2) V(t4) V(s7) V(s8) V(s9) V(s10)
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
#define MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V)
diff --git a/deps/v8/src/codegen/s390/assembler-s390-inl.h b/deps/v8/src/codegen/s390/assembler-s390-inl.h
index dc04acec61..6c4923194a 100644
--- a/deps/v8/src/codegen/s390/assembler-s390-inl.h
+++ b/deps/v8/src/codegen/s390/assembler-s390-inl.h
@@ -153,10 +153,10 @@ HeapObject RelocInfo::target_object() {
}
}
-HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
if (IsCompressedEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(DecompressTaggedAny(
- isolate,
+ cage_base,
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {
return target_object();
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index b7b582d5a5..9b888e50da 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -1656,8 +1656,10 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
- CmpS64(expected_parameter_count, Operand(kDontAdaptArgumentsSentinel));
- beq(&regular_invoke);
+ if (kDontAdaptArgumentsSentinel != 0) {
+ CmpS64(expected_parameter_count, Operand(kDontAdaptArgumentsSentinel));
+ beq(&regular_invoke);
+ }
// If overapplication or if the actual argument count is equal to the
// formal parameter count, no need to push extra undefined values.
@@ -1706,8 +1708,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
bkpt(0);
}
@@ -1729,8 +1731,8 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
{
// Load receiver to pass it later to DebugOnFunctionCall hook.
LoadReceiver(r6, actual_parameter_count);
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
Push(expected_parameter_count);
@@ -1889,16 +1891,27 @@ void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
CmpS64(type_reg, Operand(type));
}
+void MacroAssembler::CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_LT(lower_limit, higher_limit);
+ if (lower_limit != 0) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ mov(scratch, value);
+ slgfi(scratch, Operand(lower_limit));
+ CmpU64(scratch, Operand(higher_limit - lower_limit));
+ } else {
+ CmpU64(value, Operand(higher_limit));
+ }
+}
+
void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
InstanceType lower_limit,
InstanceType higher_limit) {
DCHECK_LT(lower_limit, higher_limit);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
LoadU16(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- mov(scratch, type_reg);
- slgfi(scratch, Operand(lower_limit));
- CmpU64(scratch, Operand(higher_limit - lower_limit));
+ CompareRange(type_reg, lower_limit, higher_limit);
}
void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
@@ -1912,14 +1925,7 @@ void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit,
Label* on_in_range) {
- if (lower_limit != 0) {
- Register scratch = r0;
- mov(scratch, value);
- slgfi(scratch, Operand(lower_limit));
- CmpU64(scratch, Operand(higher_limit - lower_limit));
- } else {
- CmpU64(value, Operand(higher_limit));
- }
+ CompareRange(value, lower_limit, higher_limit);
ble(on_in_range);
}
@@ -2079,7 +2085,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
lgfi(r2, Operand(static_cast<int>(reason)));
PrepareCallCFunction(1, 0, r3);
Move(r3, ExternalReference::abort_with_reason());
@@ -2095,7 +2101,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
@@ -2116,7 +2122,7 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
}
-void MacroAssembler::AssertNotSmi(Register object) {
+void TurboAssembler::AssertNotSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
@@ -2124,7 +2130,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
-void MacroAssembler::AssertSmi(Register object) {
+void TurboAssembler::AssertSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
@@ -3917,125 +3923,6 @@ void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
}
}
-// Vector LE Load and Transform instructions.
-void TurboAssembler::LoadAndSplat8x16LE(Simd128Register dst,
- const MemOperand& mem) {
- vlrep(dst, mem, Condition(0));
-}
-#define LOAD_SPLAT_LIST(V) \
- V(64x2, LoadU64LE, 3) \
- V(32x4, LoadU32LE, 2) \
- V(16x8, LoadU16LE, 1)
-
-#define LOAD_SPLAT(name, scalar_instr, condition) \
- void TurboAssembler::LoadAndSplat##name##LE(Simd128Register dst, \
- const MemOperand& mem) { \
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) && \
- is_uint12(mem.offset())) { \
- vlbrrep(dst, mem, Condition(condition)); \
- return; \
- } \
- scalar_instr(r1, mem); \
- vlvg(dst, r1, MemOperand(r0, 0), Condition(condition)); \
- vrep(dst, dst, Operand(0), Condition(condition)); \
- }
-LOAD_SPLAT_LIST(LOAD_SPLAT)
-#undef LOAD_SPLAT
-#undef LOAD_SPLAT_LIST
-
-#define LOAD_EXTEND_LIST(V) \
- V(32x2U, vuplh, 2) \
- V(32x2S, vuph, 2) \
- V(16x4U, vuplh, 1) \
- V(16x4S, vuph, 1) \
- V(8x8U, vuplh, 0) \
- V(8x8S, vuph, 0)
-
-#define LOAD_EXTEND(name, unpack_instr, condition) \
- void TurboAssembler::LoadAndExtend##name##LE(Simd128Register dst, \
- const MemOperand& mem) { \
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) && \
- is_uint12(mem.offset())) { \
- vlebrg(kScratchDoubleReg, mem, Condition(0)); \
- } else { \
- LoadU64LE(r1, mem); \
- vlvg(kScratchDoubleReg, r1, MemOperand(r0, 0), Condition(3)); \
- } \
- unpack_instr(dst, kScratchDoubleReg, Condition(0), Condition(0), \
- Condition(condition)); \
- }
-LOAD_EXTEND_LIST(LOAD_EXTEND)
-#undef LOAD_EXTEND
-#undef LOAD_EXTEND
-
-void TurboAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem) {
- vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
- vlebrf(dst, mem, Condition(3));
- return;
- }
- LoadU32LE(r1, mem);
- vlvg(dst, r1, MemOperand(r0, 3), Condition(2));
-}
-
-void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem) {
- vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
- vlebrg(dst, mem, Condition(1));
- return;
- }
- LoadU64LE(r1, mem);
- vlvg(dst, r1, MemOperand(r0, 1), Condition(3));
-}
-
-void TurboAssembler::LoadLane8LE(Simd128Register dst, const MemOperand& mem,
- int index) {
- vleb(dst, mem, Condition(index));
-}
-#define LOAD_LANE_LIST(V) \
- V(64, vlebrg, LoadU64LE, 3) \
- V(32, vlebrf, LoadU32LE, 2) \
- V(16, vlebrh, LoadU16LE, 1)
-
-#define LOAD_LANE(name, vector_instr, scalar_instr, condition) \
- void TurboAssembler::LoadLane##name##LE(Simd128Register dst, \
- const MemOperand& mem, int lane) { \
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) && \
- is_uint12(mem.offset())) { \
- vector_instr(dst, mem, Condition(lane)); \
- return; \
- } \
- scalar_instr(r1, mem); \
- vlvg(dst, r1, MemOperand(r0, lane), Condition(condition)); \
- }
-LOAD_LANE_LIST(LOAD_LANE)
-#undef LOAD_LANE
-#undef LOAD_LANE_LIST
-
-void TurboAssembler::StoreLane8LE(Simd128Register src, const MemOperand& mem,
- int index) {
- vsteb(src, mem, Condition(index));
-}
-#define STORE_LANE_LIST(V) \
- V(64, vstebrg, StoreU64LE, 3) \
- V(32, vstebrf, StoreU32LE, 2) \
- V(16, vstebrh, StoreU16LE, 1)
-
-#define STORE_LANE(name, vector_instr, scalar_instr, condition) \
- void TurboAssembler::StoreLane##name##LE(Simd128Register src, \
- const MemOperand& mem, int lane) { \
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) && \
- is_uint12(mem.offset())) { \
- vector_instr(src, mem, Condition(lane)); \
- return; \
- } \
- vlgv(r1, src, MemOperand(r0, lane), Condition(condition)); \
- scalar_instr(r1, mem); \
- }
-STORE_LANE_LIST(STORE_LANE)
-#undef STORE_LANE
-#undef STORE_LANE_LIST
-
#else
void TurboAssembler::LoadU64LE(Register dst, const MemOperand& mem,
Register scratch) {
@@ -4108,83 +3995,6 @@ void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
StoreV128(src, mem, scratch1);
}
-// Vector LE Load and Transform instructions.
-#define LOAD_SPLAT_LIST(V) \
- V(64x2, 3) \
- V(32x4, 2) \
- V(16x8, 1) \
- V(8x16, 0)
-
-#define LOAD_SPLAT(name, condition) \
- void TurboAssembler::LoadAndSplat##name##LE(Simd128Register dst, \
- const MemOperand& mem) { \
- vlrep(dst, mem, Condition(condition)); \
- }
-LOAD_SPLAT_LIST(LOAD_SPLAT)
-#undef LOAD_SPLAT
-#undef LOAD_SPLAT_LIST
-
-#define LOAD_EXTEND_LIST(V) \
- V(32x2U, vuplh, 2) \
- V(32x2S, vuph, 2) \
- V(16x4U, vuplh, 1) \
- V(16x4S, vuph, 1) \
- V(8x8U, vuplh, 0) \
- V(8x8S, vuph, 0)
-
-#define LOAD_EXTEND(name, unpack_instr, condition) \
- void TurboAssembler::LoadAndExtend##name##LE(Simd128Register dst, \
- const MemOperand& mem) { \
- vleg(kScratchDoubleReg, mem, Condition(0)); \
- unpack_instr(dst, kScratchDoubleReg, Condition(0), Condition(0), \
- Condition(condition)); \
- }
-LOAD_EXTEND_LIST(LOAD_EXTEND)
-#undef LOAD_EXTEND
-#undef LOAD_EXTEND
-
-void TurboAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem) {
- vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
- vlef(dst, mem, Condition(3));
-}
-
-void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem) {
- vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
- vleg(dst, mem, Condition(1));
-}
-
-#define LOAD_LANE_LIST(V) \
- V(64, vleg) \
- V(32, vlef) \
- V(16, vleh) \
- V(8, vleb)
-
-#define LOAD_LANE(name, vector_instr) \
- void TurboAssembler::LoadLane##name##LE(Simd128Register dst, \
- const MemOperand& mem, int lane) { \
- DCHECK(is_uint12(mem.offset())); \
- vector_instr(dst, mem, Condition(lane)); \
- }
-LOAD_LANE_LIST(LOAD_LANE)
-#undef LOAD_LANE
-#undef LOAD_LANE_LIST
-
-#define STORE_LANE_LIST(V) \
- V(64, vsteg) \
- V(32, vstef) \
- V(16, vsteh) \
- V(8, vsteb)
-
-#define STORE_LANE(name, vector_instr) \
- void TurboAssembler::StoreLane##name##LE(Simd128Register src, \
- const MemOperand& mem, int lane) { \
- DCHECK(is_uint12(mem.offset())); \
- vector_instr(src, mem, Condition(lane)); \
- }
-STORE_LANE_LIST(STORE_LANE)
-#undef STORE_LANE
-#undef STORE_LANE_LIST
-
#endif
// Load And Test (Reg <- Reg)
@@ -5612,6 +5422,123 @@ void TurboAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1,
vo(dst, dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
}
+// Vector LE Load and Transform instructions.
+#ifdef V8_TARGET_BIG_ENDIAN
+#define IS_BIG_ENDIAN true
+#else
+#define IS_BIG_ENDIAN false
+#endif
+
+#define CAN_LOAD_STORE_REVERSE \
+ IS_BIG_ENDIAN&& CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)
+
+#define LOAD_SPLAT_LIST(V) \
+ V(64x2, vlbrrep, LoadU64LE, 3) \
+ V(32x4, vlbrrep, LoadU32LE, 2) \
+ V(16x8, vlbrrep, LoadU16LE, 1) \
+ V(8x16, vlrep, LoadU8, 0)
+
+#define LOAD_SPLAT(name, vector_instr, scalar_instr, condition) \
+ void TurboAssembler::LoadAndSplat##name##LE(Simd128Register dst, \
+ const MemOperand& mem) { \
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
+ vector_instr(dst, mem, Condition(condition)); \
+ return; \
+ } \
+ scalar_instr(r1, mem); \
+ vlvg(dst, r1, MemOperand(r0, 0), Condition(condition)); \
+ vrep(dst, dst, Operand(0), Condition(condition)); \
+ }
+LOAD_SPLAT_LIST(LOAD_SPLAT)
+#undef LOAD_SPLAT
+#undef LOAD_SPLAT_LIST
+
+#define LOAD_EXTEND_LIST(V) \
+ V(32x2U, vuplh, 2) \
+ V(32x2S, vuph, 2) \
+ V(16x4U, vuplh, 1) \
+ V(16x4S, vuph, 1) \
+ V(8x8U, vuplh, 0) \
+ V(8x8S, vuph, 0)
+
+#define LOAD_EXTEND(name, unpack_instr, condition) \
+ void TurboAssembler::LoadAndExtend##name##LE(Simd128Register dst, \
+ const MemOperand& mem) { \
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
+ vlebrg(kScratchDoubleReg, mem, Condition(0)); \
+ } else { \
+ LoadU64LE(r1, mem); \
+ vlvg(kScratchDoubleReg, r1, MemOperand(r0, 0), Condition(3)); \
+ } \
+ unpack_instr(dst, kScratchDoubleReg, Condition(0), Condition(0), \
+ Condition(condition)); \
+ }
+LOAD_EXTEND_LIST(LOAD_EXTEND)
+#undef LOAD_EXTEND
+#undef LOAD_EXTEND
+
+void TurboAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem) {
+ vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) {
+ vlebrf(dst, mem, Condition(3));
+ return;
+ }
+ LoadU32LE(r1, mem);
+ vlvg(dst, r1, MemOperand(r0, 3), Condition(2));
+}
+
+void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem) {
+ vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) {
+ vlebrg(dst, mem, Condition(1));
+ return;
+ }
+ LoadU64LE(r1, mem);
+ vlvg(dst, r1, MemOperand(r0, 1), Condition(3));
+}
+
+#define LOAD_LANE_LIST(V) \
+ V(64, vlebrg, LoadU64LE, 3) \
+ V(32, vlebrf, LoadU32LE, 2) \
+ V(16, vlebrh, LoadU16LE, 1) \
+ V(8, vleb, LoadU8, 0)
+
+#define LOAD_LANE(name, vector_instr, scalar_instr, condition) \
+ void TurboAssembler::LoadLane##name##LE(Simd128Register dst, \
+ const MemOperand& mem, int lane) { \
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
+ vector_instr(dst, mem, Condition(lane)); \
+ return; \
+ } \
+ scalar_instr(r1, mem); \
+ vlvg(dst, r1, MemOperand(r0, lane), Condition(condition)); \
+ }
+LOAD_LANE_LIST(LOAD_LANE)
+#undef LOAD_LANE
+#undef LOAD_LANE_LIST
+
+#define STORE_LANE_LIST(V) \
+ V(64, vstebrg, StoreU64LE, 3) \
+ V(32, vstebrf, StoreU32LE, 2) \
+ V(16, vstebrh, StoreU16LE, 1) \
+ V(8, vsteb, StoreU8, 0)
+
+#define STORE_LANE(name, vector_instr, scalar_instr, condition) \
+ void TurboAssembler::StoreLane##name##LE(Simd128Register src, \
+ const MemOperand& mem, int lane) { \
+ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \
+ vector_instr(src, mem, Condition(lane)); \
+ return; \
+ } \
+ vlgv(r1, src, MemOperand(r0, lane), Condition(condition)); \
+ scalar_instr(r1, mem); \
+ }
+STORE_LANE_LIST(STORE_LANE)
+#undef STORE_LANE
+#undef STORE_LANE_LIST
+#undef CAN_LOAD_STORE_REVERSE
+#undef IS_BIG_ENDIAN
+
#undef kScratchDoubleReg
} // namespace internal
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index ab105d5eb4..e7c4e8994c 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -1021,6 +1021,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
lgfr(dst, dst);
}
+ void SmiToInt32(Register smi) {
+ if (FLAG_enable_slow_asserts) {
+ AssertSmi(smi);
+ }
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
+ SmiUntag(smi);
+ }
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
// Activation support.
void EnterFrame(StackFrame::Type type,
@@ -1354,6 +1365,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Checks if value is in range [lower_limit, higher_limit] using a single
// comparison.
+ void CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit);
void JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit, Label* on_in_range);
@@ -1461,10 +1474,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bne(not_smi_label /*, cr0*/);
}
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object);
- void AssertSmi(Register object);
-
#if !defined(V8_COMPRESS_POINTERS) && !defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
// Ensure it is permissible to read/write int value directly from
// upper half of the smi.
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
index dc39be5b84..b8210303f4 100644
--- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
@@ -73,6 +73,32 @@ void SharedTurboAssembler::And(Register dst, Immediate src) {
#endif
}
+void SharedTurboAssembler::Movhps(XMMRegister dst, XMMRegister src1,
+ Operand src2) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovhps(dst, src1, src2);
+ } else {
+ if (dst != src1) {
+ movaps(dst, src1);
+ }
+ movhps(dst, src2);
+ }
+}
+
+void SharedTurboAssembler::Movlps(XMMRegister dst, XMMRegister src1,
+ Operand src2) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovlps(dst, src1, src2);
+ } else {
+ if (dst != src1) {
+ movaps(dst, src1);
+ }
+ movlps(dst, src2);
+ }
+}
+
void SharedTurboAssembler::Shufps(XMMRegister dst, XMMRegister src1,
XMMRegister src2, uint8_t imm8) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -88,6 +114,7 @@ void SharedTurboAssembler::Shufps(XMMRegister dst, XMMRegister src1,
void SharedTurboAssembler::F64x2ExtractLane(DoubleRegister dst, XMMRegister src,
uint8_t lane) {
+ ASM_CODE_COMMENT(this);
if (lane == 0) {
if (dst != src) {
Movaps(dst, src);
@@ -106,6 +133,7 @@ void SharedTurboAssembler::F64x2ExtractLane(DoubleRegister dst, XMMRegister src,
void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src,
DoubleRegister rep, uint8_t lane) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
if (lane == 0) {
@@ -127,8 +155,70 @@ void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src,
}
}
+void SharedTurboAssembler::F32x4Min(XMMRegister dst, XMMRegister lhs,
+ XMMRegister rhs, XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
+ // The minps instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform minps in both orders, merge the results, and adjust.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vminps(scratch, lhs, rhs);
+ vminps(dst, rhs, lhs);
+ } else if (dst == lhs || dst == rhs) {
+ XMMRegister src = dst == lhs ? rhs : lhs;
+ movaps(scratch, src);
+ minps(scratch, dst);
+ minps(dst, src);
+ } else {
+ movaps(scratch, lhs);
+ minps(scratch, rhs);
+ movaps(dst, rhs);
+ minps(dst, lhs);
+ }
+ // Propagate -0's and NaNs, which may be non-canonical.
+ Orps(scratch, dst);
+ // Canonicalize NaNs by quieting and clearing the payload.
+ Cmpunordps(dst, dst, scratch);
+ Orps(scratch, dst);
+ Psrld(dst, dst, byte{10});
+ Andnps(dst, dst, scratch);
+}
+
+void SharedTurboAssembler::F32x4Max(XMMRegister dst, XMMRegister lhs,
+ XMMRegister rhs, XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
+ // The maxps instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform maxps in both orders, merge the results, and adjust.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmaxps(scratch, lhs, rhs);
+ vmaxps(dst, rhs, lhs);
+ } else if (dst == lhs || dst == rhs) {
+ XMMRegister src = dst == lhs ? rhs : lhs;
+ movaps(scratch, src);
+ maxps(scratch, dst);
+ maxps(dst, src);
+ } else {
+ movaps(scratch, lhs);
+ maxps(scratch, rhs);
+ movaps(dst, rhs);
+ maxps(dst, lhs);
+ }
+ // Find discrepancies.
+ Xorps(dst, scratch);
+ // Propagate NaNs, which may be non-canonical.
+ Orps(scratch, dst);
+ // Propagate sign discrepancy and (subtle) quiet NaNs.
+ Subps(scratch, scratch, dst);
+ // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
+ Cmpunordps(dst, dst, scratch);
+ Psrld(dst, dst, byte{10});
+ Andnps(dst, dst, scratch);
+}
+
void SharedTurboAssembler::F64x2Min(XMMRegister dst, XMMRegister lhs,
XMMRegister rhs, XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
// The minpd instruction doesn't propagate NaNs and +0's in its first
@@ -166,6 +256,7 @@ void SharedTurboAssembler::F64x2Min(XMMRegister dst, XMMRegister lhs,
void SharedTurboAssembler::F64x2Max(XMMRegister dst, XMMRegister lhs,
XMMRegister rhs, XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
// The maxpd instruction doesn't propagate NaNs and +0's in its first
@@ -204,6 +295,7 @@ void SharedTurboAssembler::F64x2Max(XMMRegister dst, XMMRegister lhs,
}
void SharedTurboAssembler::F32x4Splat(XMMRegister dst, DoubleRegister src) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX2)) {
CpuFeatureScope avx2_scope(this, AVX2);
vbroadcastss(dst, src);
@@ -222,6 +314,7 @@ void SharedTurboAssembler::F32x4Splat(XMMRegister dst, DoubleRegister src) {
void SharedTurboAssembler::F32x4ExtractLane(FloatRegister dst, XMMRegister src,
uint8_t lane) {
+ ASM_CODE_COMMENT(this);
DCHECK_LT(lane, 4);
// These instructions are shorter than insertps, but will leave junk in
// the top lanes of dst.
@@ -243,6 +336,7 @@ void SharedTurboAssembler::F32x4ExtractLane(FloatRegister dst, XMMRegister src,
void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src,
uint8_t laneidx) {
+ ASM_CODE_COMMENT(this);
if (laneidx == 0) {
Movss(dst, src);
} else {
@@ -254,6 +348,7 @@ void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src,
template <typename Op>
void SharedTurboAssembler::I8x16SplatPreAvx2(XMMRegister dst, Op src,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
DCHECK(!CpuFeatures::IsSupported(AVX2));
CpuFeatureScope ssse3_scope(this, SSSE3);
Movd(dst, src);
@@ -263,6 +358,7 @@ void SharedTurboAssembler::I8x16SplatPreAvx2(XMMRegister dst, Op src,
void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Register src,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX2)) {
CpuFeatureScope avx2_scope(this, AVX2);
Movd(scratch, src);
@@ -274,6 +370,7 @@ void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Register src,
void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Operand src,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
DCHECK_OPERAND_IS_NOT_REG(src);
if (CpuFeatures::IsSupported(AVX2)) {
CpuFeatureScope avx2_scope(this, AVX2);
@@ -286,6 +383,7 @@ void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Operand src,
void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
uint8_t src2, Register tmp1,
XMMRegister tmp2) {
+ ASM_CODE_COMMENT(this);
DCHECK_NE(dst, tmp2);
// Perform 16-bit shift, then mask away low bits.
if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) {
@@ -307,6 +405,7 @@ void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
Register src2, Register tmp1,
XMMRegister tmp2, XMMRegister tmp3) {
+ ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(dst, tmp2, tmp3));
DCHECK(!AreAliased(src1, tmp2, tmp3));
@@ -332,6 +431,7 @@ void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
uint8_t src2, XMMRegister tmp) {
+ ASM_CODE_COMMENT(this);
// Unpack bytes into words, do word (16-bit) shifts, and repack.
DCHECK_NE(dst, tmp);
uint8_t shift = truncate_to_int3(src2) + 8;
@@ -346,6 +446,7 @@ void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
Register src2, Register tmp1,
XMMRegister tmp2, XMMRegister tmp3) {
+ ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(dst, tmp2, tmp3));
DCHECK_NE(src1, tmp2);
@@ -366,6 +467,7 @@ void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
uint8_t src2, Register tmp1,
XMMRegister tmp2) {
+ ASM_CODE_COMMENT(this);
DCHECK_NE(dst, tmp2);
if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) {
movaps(dst, src1);
@@ -387,6 +489,7 @@ void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
Register src2, Register tmp1,
XMMRegister tmp2, XMMRegister tmp3) {
+ ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(dst, tmp2, tmp3));
DCHECK_NE(src1, tmp2);
@@ -413,6 +516,7 @@ void SharedTurboAssembler::I16x8SplatPreAvx2(XMMRegister dst, Op src) {
}
void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Register src) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX2)) {
CpuFeatureScope avx2_scope(this, AVX2);
Movd(dst, src);
@@ -423,6 +527,7 @@ void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Register src) {
}
void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Operand src) {
+ ASM_CODE_COMMENT(this);
DCHECK_OPERAND_IS_NOT_REG(src);
if (CpuFeatures::IsSupported(AVX2)) {
CpuFeatureScope avx2_scope(this, AVX2);
@@ -435,6 +540,7 @@ void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Operand src) {
void SharedTurboAssembler::I16x8ExtMulLow(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister scratch,
bool is_signed) {
+ ASM_CODE_COMMENT(this);
is_signed ? Pmovsxbw(scratch, src1) : Pmovzxbw(scratch, src1);
is_signed ? Pmovsxbw(dst, src2) : Pmovzxbw(dst, src2);
Pmullw(dst, scratch);
@@ -443,6 +549,7 @@ void SharedTurboAssembler::I16x8ExtMulLow(XMMRegister dst, XMMRegister src1,
void SharedTurboAssembler::I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1,
XMMRegister src2,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vpunpckhbw(scratch, src1, src1);
@@ -466,6 +573,7 @@ void SharedTurboAssembler::I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1,
void SharedTurboAssembler::I16x8ExtMulHighU(XMMRegister dst, XMMRegister src1,
XMMRegister src2,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
// The logic here is slightly complicated to handle all the cases of register
// aliasing. This allows flexibility for callers in TurboFan and Liftoff.
if (CpuFeatures::IsSupported(AVX)) {
@@ -514,6 +622,7 @@ void SharedTurboAssembler::I16x8ExtMulHighU(XMMRegister dst, XMMRegister src1,
void SharedTurboAssembler::I16x8SConvertI8x16High(XMMRegister dst,
XMMRegister src) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
// src = |a|b|c|d|e|f|g|h|i|j|k|l|m|n|o|p| (high)
@@ -537,6 +646,7 @@ void SharedTurboAssembler::I16x8SConvertI8x16High(XMMRegister dst,
void SharedTurboAssembler::I16x8UConvertI8x16High(XMMRegister dst,
XMMRegister src,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
// tmp = |0|0|0|0|0|0|0|0 | 0|0|0|0|0|0|0|0|
@@ -624,6 +734,7 @@ void SharedTurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst,
void SharedTurboAssembler::I32x4ExtMul(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister scratch,
bool low, bool is_signed) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vpmullw(scratch, src1, src2);
@@ -640,6 +751,7 @@ void SharedTurboAssembler::I32x4ExtMul(XMMRegister dst, XMMRegister src1,
void SharedTurboAssembler::I32x4SConvertI16x8High(XMMRegister dst,
XMMRegister src) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
// src = |a|b|c|d|e|f|g|h| (high)
@@ -663,6 +775,7 @@ void SharedTurboAssembler::I32x4SConvertI16x8High(XMMRegister dst,
void SharedTurboAssembler::I32x4UConvertI16x8High(XMMRegister dst,
XMMRegister src,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
// scratch = |0|0|0|0|0|0|0|0|
@@ -687,6 +800,7 @@ void SharedTurboAssembler::I32x4UConvertI16x8High(XMMRegister dst,
void SharedTurboAssembler::I64x2Neg(XMMRegister dst, XMMRegister src,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vpxor(scratch, scratch, scratch);
@@ -703,6 +817,7 @@ void SharedTurboAssembler::I64x2Neg(XMMRegister dst, XMMRegister src,
void SharedTurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
XMMRegister tmp = dst == src ? scratch : dst;
@@ -723,13 +838,22 @@ void SharedTurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src,
void SharedTurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0,
XMMRegister src1, XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vpcmpgtq(dst, src0, src1);
} else if (CpuFeatures::IsSupported(SSE4_2)) {
CpuFeatureScope sse_scope(this, SSE4_2);
- DCHECK_EQ(dst, src0);
- pcmpgtq(dst, src1);
+ if (dst == src0) {
+ pcmpgtq(dst, src1);
+ } else if (dst == src1) {
+ movaps(scratch, src0);
+ pcmpgtq(scratch, src1);
+ movaps(dst, scratch);
+ } else {
+ movaps(dst, src0);
+ pcmpgtq(dst, src1);
+ }
} else {
CpuFeatureScope sse_scope(this, SSE3);
DCHECK_NE(dst, src0);
@@ -748,6 +872,7 @@ void SharedTurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0,
void SharedTurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0,
XMMRegister src1, XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vpcmpgtq(dst, src1, src0);
@@ -782,6 +907,7 @@ void SharedTurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0,
void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
uint8_t shift, XMMRegister xmm_tmp) {
+ ASM_CODE_COMMENT(this);
DCHECK_GT(64, shift);
DCHECK_NE(xmm_tmp, dst);
DCHECK_NE(xmm_tmp, src);
@@ -816,6 +942,7 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
Register shift, XMMRegister xmm_tmp,
XMMRegister xmm_shift,
Register tmp_shift) {
+ ASM_CODE_COMMENT(this);
DCHECK_NE(xmm_tmp, dst);
DCHECK_NE(xmm_tmp, src);
DCHECK_NE(xmm_shift, dst);
@@ -841,6 +968,52 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
Psubq(dst, xmm_tmp);
}
+void SharedTurboAssembler::I64x2Mul(XMMRegister dst, XMMRegister lhs,
+ XMMRegister rhs, XMMRegister tmp1,
+ XMMRegister tmp2) {
+ ASM_CODE_COMMENT(this);
+ DCHECK(!AreAliased(dst, tmp1, tmp2));
+ DCHECK(!AreAliased(lhs, tmp1, tmp2));
+ DCHECK(!AreAliased(rhs, tmp1, tmp2));
+
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // 1. Multiply high dword of each qword of left with right.
+ vpsrlq(tmp1, lhs, byte{32});
+ vpmuludq(tmp1, tmp1, rhs);
+ // 2. Multiply high dword of each qword of right with left.
+ vpsrlq(tmp2, rhs, byte{32});
+ vpmuludq(tmp2, tmp2, lhs);
+ // 3. Add 1 and 2, then shift left by 32 (this is the high dword of result).
+ vpaddq(tmp2, tmp2, tmp1);
+ vpsllq(tmp2, tmp2, byte{32});
+ // 4. Multiply low dwords (this is the low dword of result).
+ vpmuludq(dst, lhs, rhs);
+ // 5. Add 3 and 4.
+ vpaddq(dst, dst, tmp2);
+ } else {
+ // Same algorithm as AVX version, but with moves to not overwrite inputs.
+ movaps(tmp1, lhs);
+ movaps(tmp2, rhs);
+ psrlq(tmp1, byte{32});
+ pmuludq(tmp1, rhs);
+ psrlq(tmp2, byte{32});
+ pmuludq(tmp2, lhs);
+ paddq(tmp2, tmp1);
+ psllq(tmp2, byte{32});
+ if (dst == rhs) {
+ // pmuludq is commutative
+ pmuludq(dst, lhs);
+ } else {
+ if (dst != lhs) {
+ movaps(dst, lhs);
+ }
+ pmuludq(dst, rhs);
+ }
+ paddq(dst, tmp2);
+ }
+}
+
// 1. Unpack src0, src1 into even-number elements of scratch.
// 2. Unpack src1, src0 into even-number elements of dst.
// 3. Multiply 1. with 2.
@@ -848,6 +1021,7 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
void SharedTurboAssembler::I64x2ExtMul(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister scratch,
bool low, bool is_signed) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
if (low) {
@@ -877,6 +1051,7 @@ void SharedTurboAssembler::I64x2ExtMul(XMMRegister dst, XMMRegister src1,
void SharedTurboAssembler::I64x2SConvertI32x4High(XMMRegister dst,
XMMRegister src) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vpunpckhqdq(dst, src, src);
@@ -895,6 +1070,7 @@ void SharedTurboAssembler::I64x2SConvertI32x4High(XMMRegister dst,
void SharedTurboAssembler::I64x2UConvertI32x4High(XMMRegister dst,
XMMRegister src,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vpxor(scratch, scratch, scratch);
@@ -915,6 +1091,7 @@ void SharedTurboAssembler::I64x2UConvertI32x4High(XMMRegister dst,
void SharedTurboAssembler::S128Not(XMMRegister dst, XMMRegister src,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
if (dst == src) {
Pcmpeqd(scratch, scratch);
Pxor(dst, scratch);
@@ -927,6 +1104,7 @@ void SharedTurboAssembler::S128Not(XMMRegister dst, XMMRegister src,
void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask,
XMMRegister src1, XMMRegister src2,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
// v128.select = v128.or(v128.and(v1, c), v128.andnot(v2, c)).
// pandn(x, y) = !x & y, so we have to flip the mask and input.
if (CpuFeatures::IsSupported(AVX)) {
@@ -946,6 +1124,7 @@ void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask,
void SharedTurboAssembler::S128Load8Splat(XMMRegister dst, Operand src,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
// The trap handler uses the current pc to creating a landing, so that it can
// determine if a trap occured in Wasm code due to a OOB load. Make sure the
// first instruction in each case below is the one that loads.
@@ -969,6 +1148,7 @@ void SharedTurboAssembler::S128Load8Splat(XMMRegister dst, Operand src,
void SharedTurboAssembler::S128Load16Splat(XMMRegister dst, Operand src,
XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
// The trap handler uses the current pc to creating a landing, so that it can
// determine if a trap occured in Wasm code due to a OOB load. Make sure the
// first instruction in each case below is the one that loads.
@@ -989,6 +1169,7 @@ void SharedTurboAssembler::S128Load16Splat(XMMRegister dst, Operand src,
}
void SharedTurboAssembler::S128Load32Splat(XMMRegister dst, Operand src) {
+ ASM_CODE_COMMENT(this);
// The trap handler uses the current pc to creating a landing, so that it can
// determine if a trap occured in Wasm code due to a OOB load. Make sure the
// first instruction in each case below is the one that loads.
@@ -1003,6 +1184,7 @@ void SharedTurboAssembler::S128Load32Splat(XMMRegister dst, Operand src) {
void SharedTurboAssembler::S128Store64Lane(Operand dst, XMMRegister src,
uint8_t laneidx) {
+ ASM_CODE_COMMENT(this);
if (laneidx == 0) {
Movlps(dst, src);
} else {
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
index c2d07392ac..82c01e8292 100644
--- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
@@ -7,6 +7,7 @@
#include "src/base/macros.h"
#include "src/codegen/cpu-features.h"
+#include "src/codegen/external-reference.h"
#include "src/codegen/turbo-assembler.h"
#if V8_TARGET_ARCH_IA32
@@ -45,6 +46,24 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
void Add(Register dst, Immediate src);
void And(Register dst, Immediate src);
+ // Will move src1 to dst if AVX is not supported.
+ void Movhps(XMMRegister dst, XMMRegister src1, Operand src2);
+ void Movlps(XMMRegister dst, XMMRegister src1, Operand src2);
+
+ template <typename Op>
+ void Pinsrb(XMMRegister dst, XMMRegister src1, Op src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr) {
+ PinsrHelper(this, &Assembler::vpinsrb, &Assembler::pinsrb, dst, src1, src2,
+ imm8, load_pc_offset, {SSE4_1});
+ }
+
+ template <typename Op>
+ void Pinsrw(XMMRegister dst, XMMRegister src1, Op src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr) {
+ PinsrHelper(this, &Assembler::vpinsrw, &Assembler::pinsrw, dst, src1, src2,
+ imm8, load_pc_offset);
+ }
+
// Supports both SSE and AVX. Move src1 to dst if they are not equal on SSE.
template <typename Op>
void Pshufb(XMMRegister dst, XMMRegister src, Op mask) {
@@ -218,7 +237,11 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Cvtdq2ps, cvtdq2ps)
AVX_OP(Cvtpd2ps, cvtpd2ps)
AVX_OP(Cvtps2pd, cvtps2pd)
+ AVX_OP(Cvtsd2ss, cvtsd2ss)
+ AVX_OP(Cvtss2sd, cvtss2sd)
AVX_OP(Cvttps2dq, cvttps2dq)
+ AVX_OP(Cvttsd2si, cvttsd2si)
+ AVX_OP(Cvttss2si, cvttss2si)
AVX_OP(Divpd, divpd)
AVX_OP(Divps, divps)
AVX_OP(Divsd, divsd)
@@ -260,9 +283,9 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Pcmpgtb, pcmpgtb)
AVX_OP(Pcmpgtd, pcmpgtd)
AVX_OP(Pcmpgtw, pcmpgtw)
+ AVX_OP(Pcmpeqb, pcmpeqb)
AVX_OP(Pcmpeqd, pcmpeqd)
AVX_OP(Pcmpeqw, pcmpeqw)
- AVX_OP(Pinsrw, pinsrw)
AVX_OP(Pmaddwd, pmaddwd)
AVX_OP(Pmaxsw, pmaxsw)
AVX_OP(Pmaxub, pmaxub)
@@ -308,11 +331,18 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Subps, subps)
AVX_OP(Subsd, subsd)
AVX_OP(Subss, subss)
+ AVX_OP(Ucomisd, ucomisd)
+ AVX_OP(Ucomiss, ucomiss)
AVX_OP(Unpcklps, unpcklps)
AVX_OP(Xorpd, xorpd)
AVX_OP(Xorps, xorps)
+ // Many AVX processors have separate integer/floating-point domains, so use
+ // vmovaps if AVX is supported. On SSE, movaps is 1 byte shorter than movdqa,
+ // and has the same behavior. Most SSE processors also don't have the same
+ // delay moving between integer and floating-point domains.
AVX_OP_WITH_DIFF_SSE_INSTR(Movapd, movapd, movaps)
+ AVX_OP_WITH_DIFF_SSE_INSTR(Movdqa, movdqa, movaps)
AVX_OP_WITH_DIFF_SSE_INSTR(Movdqu, movdqu, movups)
AVX_OP_WITH_DIFF_SSE_INSTR(Pand, pand, andps)
AVX_OP_WITH_DIFF_SSE_INSTR(Por, por, orps)
@@ -332,11 +362,12 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP_SSSE3(Psignw, psignw)
AVX_OP_SSE4_1(Extractps, extractps)
+ AVX_OP_SSE4_1(Insertps, insertps)
AVX_OP_SSE4_1(Packusdw, packusdw)
AVX_OP_SSE4_1(Pblendw, pblendw)
+ AVX_OP_SSE4_1(Pcmpeqq, pcmpeqq)
AVX_OP_SSE4_1(Pextrb, pextrb)
AVX_OP_SSE4_1(Pextrw, pextrw)
- AVX_OP_SSE4_1(Pinsrb, pinsrb)
AVX_OP_SSE4_1(Pmaxsb, pmaxsb)
AVX_OP_SSE4_1(Pmaxsd, pmaxsd)
AVX_OP_SSE4_1(Pmaxud, pmaxud)
@@ -355,6 +386,14 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP_SSE4_1(Ptest, ptest)
AVX_OP_SSE4_1(Roundpd, roundpd)
AVX_OP_SSE4_1(Roundps, roundps)
+ AVX_OP_SSE4_1(Roundsd, roundsd)
+ AVX_OP_SSE4_1(Roundss, roundss)
+
+#undef AVX_OP
+#undef AVX_OP_SSE3
+#undef AVX_OP_SSSE3
+#undef AVX_OP_SSE4_1
+#undef AVX_OP_SSE4_2
void F64x2ExtractLane(DoubleRegister dst, XMMRegister src, uint8_t lane);
void F64x2ReplaceLane(XMMRegister dst, XMMRegister src, DoubleRegister rep,
@@ -365,6 +404,10 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
XMMRegister scratch);
void F32x4Splat(XMMRegister dst, DoubleRegister src);
void F32x4ExtractLane(FloatRegister dst, XMMRegister src, uint8_t lane);
+ void F32x4Min(XMMRegister dst, XMMRegister lhs, XMMRegister rhs,
+ XMMRegister scratch);
+ void F32x4Max(XMMRegister dst, XMMRegister lhs, XMMRegister rhs,
+ XMMRegister scratch);
void S128Store32Lane(Operand dst, XMMRegister src, uint8_t laneidx);
void I8x16Splat(XMMRegister dst, Register src, XMMRegister scratch);
void I8x16Splat(XMMRegister dst, Operand src, XMMRegister scratch);
@@ -413,6 +456,8 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
void I64x2ShrS(XMMRegister dst, XMMRegister src, Register shift,
XMMRegister xmm_tmp, XMMRegister xmm_shift,
Register tmp_shift);
+ void I64x2Mul(XMMRegister dst, XMMRegister lhs, XMMRegister rhs,
+ XMMRegister tmp1, XMMRegister tmp2);
void I64x2ExtMul(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister scratch, bool low, bool is_signed);
void I64x2SConvertI32x4High(XMMRegister dst, XMMRegister src);
@@ -427,6 +472,35 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
void S128Load32Splat(XMMRegister dst, Operand src);
void S128Store64Lane(Operand dst, XMMRegister src, uint8_t laneidx);
+ protected:
+ template <typename Op>
+ using AvxFn = void (Assembler::*)(XMMRegister, XMMRegister, Op, uint8_t);
+ template <typename Op>
+ using NoAvxFn = void (Assembler::*)(XMMRegister, Op, uint8_t);
+
+ template <typename Op>
+ void PinsrHelper(Assembler* assm, AvxFn<Op> avx, NoAvxFn<Op> noavx,
+ XMMRegister dst, XMMRegister src1, Op src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr,
+ base::Optional<CpuFeature> feature = base::nullopt) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(assm, AVX);
+ if (load_pc_offset) *load_pc_offset = assm->pc_offset();
+ (assm->*avx)(dst, src1, src2, imm8);
+ return;
+ }
+
+ if (dst != src1) assm->movaps(dst, src1);
+ if (load_pc_offset) *load_pc_offset = assm->pc_offset();
+ if (feature.has_value()) {
+ DCHECK(CpuFeatures::IsSupported(*feature));
+ CpuFeatureScope scope(assm, *feature);
+ (assm->*noavx)(dst, src2, imm8);
+ } else {
+ (assm->*noavx)(dst, src2, imm8);
+ }
+ }
+
private:
template <typename Op>
void I8x16SplatPreAvx2(XMMRegister dst, Op src, XMMRegister scratch);
@@ -452,6 +526,66 @@ class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler {
using SharedTurboAssembler::SharedTurboAssembler;
public:
+ void Abspd(XMMRegister dst, XMMRegister src, Register tmp) {
+ FloatUnop(dst, src, tmp, &SharedTurboAssembler::Andps,
+ ExternalReference::address_of_double_abs_constant());
+ }
+
+ void Absps(XMMRegister dst, XMMRegister src, Register tmp) {
+ FloatUnop(dst, src, tmp, &SharedTurboAssembler::Andps,
+ ExternalReference::address_of_float_abs_constant());
+ }
+
+ void Negpd(XMMRegister dst, XMMRegister src, Register tmp) {
+ FloatUnop(dst, src, tmp, &SharedTurboAssembler::Xorps,
+ ExternalReference::address_of_double_neg_constant());
+ }
+
+ void Negps(XMMRegister dst, XMMRegister src, Register tmp) {
+ FloatUnop(dst, src, tmp, &SharedTurboAssembler::Xorps,
+ ExternalReference::address_of_float_neg_constant());
+ }
+#undef FLOAT_UNOP
+
+ void Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
+ if (imm8 == 0) {
+ Movd(dst, src);
+ return;
+ }
+
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpextrd(dst, src, imm8);
+ } else if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pextrd(dst, src, imm8);
+ } else {
+ DCHECK_LT(imm8, 2);
+ impl()->PextrdPreSse41(dst, src, imm8);
+ }
+ }
+
+ template <typename Op>
+ void Pinsrd(XMMRegister dst, XMMRegister src1, Op src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr) {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ PinsrHelper(this, &Assembler::vpinsrd, &Assembler::pinsrd, dst, src1,
+ src2, imm8, load_pc_offset,
+ base::Optional<CpuFeature>(SSE4_1));
+ } else {
+ if (dst != src1) {
+ movaps(dst, src1);
+ }
+ impl()->PinsrdPreSse41(dst, src2, imm8, load_pc_offset);
+ }
+ }
+
+ template <typename Op>
+ void Pinsrd(XMMRegister dst, Op src, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr) {
+ Pinsrd(dst, dst, src, imm8, load_pc_offset);
+ }
+
void F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src,
Register scratch) {
ASM_CODE_COMMENT(this);
@@ -474,6 +608,7 @@ class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler {
void I32x4SConvertF32x4(XMMRegister dst, XMMRegister src, XMMRegister tmp,
Register scratch) {
+ ASM_CODE_COMMENT(this);
Operand op = ExternalReferenceAsOperand(
ExternalReference::address_of_wasm_int32_overflow_as_float(), scratch);
@@ -515,6 +650,7 @@ class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler {
void I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src,
XMMRegister scratch, Register tmp) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
XMMRegister original_dst = dst;
@@ -551,6 +687,7 @@ class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler {
void I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src,
XMMRegister scratch, Register tmp) {
+ ASM_CODE_COMMENT(this);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vxorpd(scratch, scratch, scratch);
@@ -590,6 +727,7 @@ class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler {
void I32x4ExtAddPairwiseI16x8S(XMMRegister dst, XMMRegister src,
Register scratch) {
+ ASM_CODE_COMMENT(this);
Operand op = ExternalReferenceAsOperand(
ExternalReference::address_of_wasm_i16x8_splat_0x0001(), scratch);
// pmaddwd multiplies signed words in src and op, producing
@@ -752,6 +890,18 @@ class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler {
Register scratch) {
return impl()->ExternalReferenceAsOperand(reference, scratch);
}
+
+ using FloatInstruction = void (SharedTurboAssembler::*)(XMMRegister,
+ XMMRegister, Operand);
+ void FloatUnop(XMMRegister dst, XMMRegister src, Register tmp,
+ FloatInstruction op, ExternalReference ext) {
+ if (!CpuFeatures::IsSupported(AVX) && (dst != src)) {
+ movaps(dst, src);
+ src = dst;
+ }
+ SharedTurboAssembler* assm = this;
+ (assm->*op)(dst, src, ExternalReferenceAsOperand(ext, tmp));
+ }
};
} // namespace internal
diff --git a/deps/v8/src/codegen/source-position.h b/deps/v8/src/codegen/source-position.h
index 0db12aea22..9ec845f907 100644
--- a/deps/v8/src/codegen/source-position.h
+++ b/deps/v8/src/codegen/source-position.h
@@ -5,7 +5,7 @@
#ifndef V8_CODEGEN_SOURCE_POSITION_H_
#define V8_CODEGEN_SOURCE_POSITION_H_
-#include <ostream>
+#include <iosfwd>
#include "src/base/bit-field.h"
#include "src/common/globals.h"
diff --git a/deps/v8/src/codegen/x64/assembler-x64-inl.h b/deps/v8/src/codegen/x64/assembler-x64-inl.h
index 628f8b6eda..851e9c2957 100644
--- a/deps/v8/src/codegen/x64/assembler-x64-inl.h
+++ b/deps/v8/src/codegen/x64/assembler-x64-inl.h
@@ -342,12 +342,12 @@ HeapObject RelocInfo::target_object() {
return HeapObject::cast(Object(ReadUnalignedValue<Address>(pc_)));
}
-HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) {
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
if (IsCompressedEmbeddedObject(rmode_)) {
Tagged_t compressed = ReadUnalignedValue<Tagged_t>(pc_);
DCHECK(!HAS_SMI_TAG(compressed));
- Object obj(DecompressTaggedPointer(isolate, compressed));
+ Object obj(DecompressTaggedPointer(cage_base, compressed));
return HeapObject::cast(obj);
}
DCHECK(IsFullEmbeddedObject(rmode_) || IsDataEmbeddedObject(rmode_));
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index 108f381ba7..1c5723c5a3 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -3581,10 +3581,18 @@ void Assembler::vmovdqa(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
+void Assembler::vmovdqa(YMMRegister dst, Operand src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, ymm0, src, kL256, k66, k0F, kWIG);
+ emit(0x6F);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::vmovdqa(YMMRegister dst, YMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, xmm0, src, kL256, k66, k0F, kWIG);
+ emit_vex_prefix(dst, ymm0, src, kL256, k66, k0F, kWIG);
emit(0x6F);
emit_sse_operand(dst, src);
}
@@ -3613,10 +3621,26 @@ void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
+void Assembler::vmovdqu(YMMRegister dst, Operand src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, ymm0, src, kL256, kF3, k0F, kWIG);
+ emit(0x6F);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::vmovdqu(Operand dst, YMMRegister src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(src, ymm0, dst, kL256, kF3, k0F, kWIG);
+ emit(0x7F);
+ emit_sse_operand(src, dst);
+}
+
void Assembler::vmovdqu(YMMRegister dst, YMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(src, xmm0, dst, kL256, kF3, k0F, kWIG);
+ emit_vex_prefix(src, ymm0, dst, kL256, kF3, k0F, kWIG);
emit(0x7F);
emit_sse_operand(src, dst);
}
@@ -3624,7 +3648,7 @@ void Assembler::vmovdqu(YMMRegister dst, YMMRegister src) {
void Assembler::vmovlps(XMMRegister dst, XMMRegister src1, Operand src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, src1, src2, kL128, kNone, k0F, kWIG);
+ emit_vex_prefix(dst, src1, src2, kL128, kNoPrefix, k0F, kWIG);
emit(0x12);
emit_sse_operand(dst, src2);
}
@@ -3632,7 +3656,7 @@ void Assembler::vmovlps(XMMRegister dst, XMMRegister src1, Operand src2) {
void Assembler::vmovlps(Operand dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(src, xmm0, dst, kL128, kNone, k0F, kWIG);
+ emit_vex_prefix(src, xmm0, dst, kL128, kNoPrefix, k0F, kWIG);
emit(0x13);
emit_sse_operand(src, dst);
}
@@ -3640,7 +3664,7 @@ void Assembler::vmovlps(Operand dst, XMMRegister src) {
void Assembler::vmovhps(XMMRegister dst, XMMRegister src1, Operand src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, src1, src2, kL128, kNone, k0F, kWIG);
+ emit_vex_prefix(dst, src1, src2, kL128, kNoPrefix, k0F, kWIG);
emit(0x16);
emit_sse_operand(dst, src2);
}
@@ -3648,7 +3672,7 @@ void Assembler::vmovhps(XMMRegister dst, XMMRegister src1, Operand src2) {
void Assembler::vmovhps(Operand dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(src, xmm0, dst, kL128, kNone, k0F, kWIG);
+ emit_vex_prefix(src, xmm0, dst, kL128, kNoPrefix, k0F, kWIG);
emit(0x17);
emit_sse_operand(src, dst);
}
@@ -3664,6 +3688,17 @@ void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
+void Assembler::vinstr(byte op, YMMRegister dst, YMMRegister src1,
+ YMMRegister src2, SIMDPrefix pp, LeadingOpcode m, VexW w,
+ CpuFeature feature) {
+ DCHECK(IsEnabled(feature));
+ DCHECK(feature == AVX || feature == AVX2);
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL256, pp, m, w);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
SIMDPrefix pp, LeadingOpcode m, VexW w,
CpuFeature feature) {
@@ -3675,11 +3710,22 @@ void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
emit_sse_operand(dst, src2);
}
+void Assembler::vinstr(byte op, YMMRegister dst, YMMRegister src1, Operand src2,
+ SIMDPrefix pp, LeadingOpcode m, VexW w,
+ CpuFeature feature) {
+ DCHECK(IsEnabled(feature));
+ DCHECK(feature == AVX || feature == AVX2);
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL256, pp, m, w);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, src1, src2, kL128, kNone, k0F, kWIG);
+ emit_vex_prefix(dst, src1, src2, kL128, kNoPrefix, k0F, kWIG);
emit(op);
emit_sse_operand(dst, src2);
}
@@ -3688,7 +3734,7 @@ void Assembler::vps(byte op, YMMRegister dst, YMMRegister src1,
YMMRegister src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, src1, src2, kL256, kNone, k0F, kWIG);
+ emit_vex_prefix(dst, src1, src2, kL256, kNoPrefix, k0F, kWIG);
emit(op);
emit_sse_operand(dst, src2);
}
@@ -3696,7 +3742,7 @@ void Assembler::vps(byte op, YMMRegister dst, YMMRegister src1,
void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, src1, src2, kL128, kNone, k0F, kWIG);
+ emit_vex_prefix(dst, src1, src2, kL128, kNoPrefix, k0F, kWIG);
emit(op);
emit_sse_operand(dst, src2);
}
@@ -3704,7 +3750,7 @@ void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
void Assembler::vps(byte op, YMMRegister dst, YMMRegister src1, Operand src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, src1, src2, kL256, kNone, k0F, kWIG);
+ emit_vex_prefix(dst, src1, src2, kL256, kNoPrefix, k0F, kWIG);
emit(op);
emit_sse_operand(dst, src2);
}
@@ -3713,7 +3759,7 @@ void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2, byte imm8) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, src1, src2, kL128, kNone, k0F, kWIG);
+ emit_vex_prefix(dst, src1, src2, kL128, kNoPrefix, k0F, kWIG);
emit(op);
emit_sse_operand(dst, src2);
emit(imm8);
@@ -3739,7 +3785,7 @@ void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
void Assembler::vucomiss(XMMRegister dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, xmm0, src, kLIG, kNone, k0F, kWIG);
+ emit_vex_prefix(dst, xmm0, src, kLIG, kNoPrefix, k0F, kWIG);
emit(0x2E);
emit_sse_operand(dst, src);
}
@@ -3747,7 +3793,7 @@ void Assembler::vucomiss(XMMRegister dst, XMMRegister src) {
void Assembler::vucomiss(XMMRegister dst, Operand src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, xmm0, src, kLIG, kNone, k0F, kWIG);
+ emit_vex_prefix(dst, xmm0, src, kLIG, kNoPrefix, k0F, kWIG);
emit(0x2E);
emit_sse_operand(dst, src);
}
@@ -3781,7 +3827,7 @@ void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
void Assembler::bmi1q(byte op, Register reg, Register vreg, Register rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
- emit_vex_prefix(reg, vreg, rm, kLZ, kNone, k0F38, kW1);
+ emit_vex_prefix(reg, vreg, rm, kLZ, kNoPrefix, k0F38, kW1);
emit(op);
emit_modrm(reg, rm);
}
@@ -3789,7 +3835,7 @@ void Assembler::bmi1q(byte op, Register reg, Register vreg, Register rm) {
void Assembler::bmi1q(byte op, Register reg, Register vreg, Operand rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
- emit_vex_prefix(reg, vreg, rm, kLZ, kNone, k0F38, kW1);
+ emit_vex_prefix(reg, vreg, rm, kLZ, kNoPrefix, k0F38, kW1);
emit(op);
emit_operand(reg, rm);
}
@@ -3797,7 +3843,7 @@ void Assembler::bmi1q(byte op, Register reg, Register vreg, Operand rm) {
void Assembler::bmi1l(byte op, Register reg, Register vreg, Register rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
- emit_vex_prefix(reg, vreg, rm, kLZ, kNone, k0F38, kW0);
+ emit_vex_prefix(reg, vreg, rm, kLZ, kNoPrefix, k0F38, kW0);
emit(op);
emit_modrm(reg, rm);
}
@@ -3805,7 +3851,7 @@ void Assembler::bmi1l(byte op, Register reg, Register vreg, Register rm) {
void Assembler::bmi1l(byte op, Register reg, Register vreg, Operand rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
- emit_vex_prefix(reg, vreg, rm, kLZ, kNone, k0F38, kW0);
+ emit_vex_prefix(reg, vreg, rm, kLZ, kNoPrefix, k0F38, kW0);
emit(op);
emit_operand(reg, rm);
}
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index cd93c7f856..fa85f2eedc 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -486,7 +486,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr byte kJzShortOpcode = kJccShortPrefix | zero;
// VEX prefix encodings.
- enum SIMDPrefix { kNone = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
+ enum SIMDPrefix { kNoPrefix = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128, kLZ = kL128 };
enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 };
enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 };
@@ -508,45 +508,20 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// - Instructions on 64-bit (quadword) operands/registers use 'q'.
// - Instructions on operands/registers with pointer size use 'p'.
-#define DECLARE_INSTRUCTION(instruction) \
- template <class P1> \
- void instruction##_tagged(P1 p1) { \
- emit_##instruction(p1, kTaggedSize); \
- } \
- \
- template <class P1> \
- void instruction##l(P1 p1) { \
- emit_##instruction(p1, kInt32Size); \
- } \
- \
- template <class P1> \
- void instruction##q(P1 p1) { \
- emit_##instruction(p1, kInt64Size); \
- } \
- \
- template <class P1, class P2> \
- void instruction##_tagged(P1 p1, P2 p2) { \
- emit_##instruction(p1, p2, kTaggedSize); \
- } \
- \
- template <class P1, class P2> \
- void instruction##l(P1 p1, P2 p2) { \
- emit_##instruction(p1, p2, kInt32Size); \
- } \
- \
- template <class P1, class P2> \
- void instruction##q(P1 p1, P2 p2) { \
- emit_##instruction(p1, p2, kInt64Size); \
- } \
- \
- template <class P1, class P2, class P3> \
- void instruction##l(P1 p1, P2 p2, P3 p3) { \
- emit_##instruction(p1, p2, p3, kInt32Size); \
- } \
- \
- template <class P1, class P2, class P3> \
- void instruction##q(P1 p1, P2 p2, P3 p3) { \
- emit_##instruction(p1, p2, p3, kInt64Size); \
+#define DECLARE_INSTRUCTION(instruction) \
+ template <typename... Ps> \
+ void instruction##_tagged(Ps... ps) { \
+ emit_##instruction(ps..., kTaggedSize); \
+ } \
+ \
+ template <typename... Ps> \
+ void instruction##l(Ps... ps) { \
+ emit_##instruction(ps..., kInt32Size); \
+ } \
+ \
+ template <typename... Ps> \
+ void instruction##q(Ps... ps) { \
+ emit_##instruction(ps..., kInt64Size); \
}
ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION)
#undef DECLARE_INSTRUCTION
@@ -964,8 +939,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vinstr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature feature = AVX);
+ void vinstr(byte op, YMMRegister dst, YMMRegister src1, YMMRegister src2,
+ SIMDPrefix pp, LeadingOpcode m, VexW w,
+ CpuFeature feature = AVX2);
void vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature feature = AVX);
+ void vinstr(byte op, YMMRegister dst, YMMRegister src1, Operand src2,
+ SIMDPrefix pp, LeadingOpcode m, VexW w,
+ CpuFeature feature = AVX2);
// SSE instructions
void sse_instr(XMMRegister dst, XMMRegister src, byte escape, byte opcode);
@@ -1350,10 +1331,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmovsd(Operand dst, XMMRegister src) { vsd(0x11, src, xmm0, dst); }
void vmovdqa(XMMRegister dst, Operand src);
void vmovdqa(XMMRegister dst, XMMRegister src);
+ void vmovdqa(YMMRegister dst, Operand src);
void vmovdqa(YMMRegister dst, YMMRegister src);
void vmovdqu(XMMRegister dst, Operand src);
void vmovdqu(Operand dst, XMMRegister src);
void vmovdqu(XMMRegister dst, XMMRegister src);
+ void vmovdqu(YMMRegister dst, Operand src);
+ void vmovdqu(Operand dst, YMMRegister src);
void vmovdqu(YMMRegister dst, YMMRegister src);
void vmovlps(XMMRegister dst, XMMRegister src1, Operand src2);
@@ -1394,15 +1378,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SSE_BINOP_INSTRUCTION_LIST(AVX_SSE_BINOP)
#undef AVX_SSE_BINOP
-#define AVX_3(instr, opcode, impl) \
- void instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- impl(opcode, dst, src1, src2); \
- } \
- void instr(XMMRegister dst, XMMRegister src1, Operand src2) { \
- impl(opcode, dst, src1, src2); \
+#define AVX_3(instr, opcode, impl, SIMDRegister) \
+ void instr(SIMDRegister dst, SIMDRegister src1, SIMDRegister src2) { \
+ impl(opcode, dst, src1, src2); \
+ } \
+ void instr(SIMDRegister dst, SIMDRegister src1, Operand src2) { \
+ impl(opcode, dst, src1, src2); \
}
- AVX_3(vhaddps, 0x7c, vsd)
+ AVX_3(vhaddps, 0x7c, vsd, XMMRegister)
+ AVX_3(vhaddps, 0x7c, vsd, YMMRegister)
#define AVX_SCALAR(instr, prefix, escape, opcode) \
void v##instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
@@ -1427,10 +1412,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
#undef AVX_SSE2_SHIFT_IMM
void vmovlhps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vinstr(0x16, dst, src1, src2, kNone, k0F, kWIG);
+ vinstr(0x16, dst, src1, src2, kNoPrefix, k0F, kWIG);
}
void vmovhlps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vinstr(0x12, dst, src1, src2, kNone, k0F, kWIG);
+ vinstr(0x12, dst, src1, src2, kNoPrefix, k0F, kWIG);
}
void vcvtdq2pd(XMMRegister dst, XMMRegister src) {
vinstr(0xe6, dst, xmm0, src, kF3, k0F, kWIG);
@@ -1516,16 +1501,22 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vinstr(0x08, dst, xmm0, src, k66, k0F3A, kWIG);
emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
}
+ void vroundps(YMMRegister dst, YMMRegister src, RoundingMode mode) {
+ vinstr(0x08, dst, ymm0, src, k66, k0F3A, kWIG, AVX);
+ emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+ }
void vroundpd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
vinstr(0x09, dst, xmm0, src, k66, k0F3A, kWIG);
emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
}
-
- void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
+ void vroundpd(YMMRegister dst, YMMRegister src, RoundingMode mode) {
+ vinstr(0x09, dst, ymm0, src, k66, k0F3A, kWIG, AVX);
+ emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
}
- void vsd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
- vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
+
+ template <typename Reg, typename Op>
+ void vsd(byte op, Reg dst, Reg src1, Op src2) {
+ vinstr(op, dst, src1, src2, kF2, k0F, kWIG, AVX);
}
void vmovss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1659,24 +1650,48 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vinstr(0x70, dst, xmm0, src, k66, k0F, kWIG);
emit(imm8);
}
+ void vpshufd(YMMRegister dst, YMMRegister src, uint8_t imm8) {
+ vinstr(0x70, dst, ymm0, src, k66, k0F, kWIG);
+ emit(imm8);
+ }
void vpshufd(XMMRegister dst, Operand src, uint8_t imm8) {
vinstr(0x70, dst, xmm0, src, k66, k0F, kWIG);
emit(imm8);
}
+ void vpshufd(YMMRegister dst, Operand src, uint8_t imm8) {
+ vinstr(0x70, dst, ymm0, src, k66, k0F, kWIG);
+ emit(imm8);
+ }
void vpshuflw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
vinstr(0x70, dst, xmm0, src, kF2, k0F, kWIG);
emit(imm8);
}
+ void vpshuflw(YMMRegister dst, YMMRegister src, uint8_t imm8) {
+ vinstr(0x70, dst, ymm0, src, kF2, k0F, kWIG);
+ emit(imm8);
+ }
void vpshuflw(XMMRegister dst, Operand src, uint8_t imm8) {
vinstr(0x70, dst, xmm0, src, kF2, k0F, kWIG);
emit(imm8);
}
+ void vpshuflw(YMMRegister dst, Operand src, uint8_t imm8) {
+ vinstr(0x70, dst, ymm0, src, kF2, k0F, kWIG);
+ emit(imm8);
+ }
void vpshufhw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
vinstr(0x70, dst, xmm0, src, kF3, k0F, kWIG);
emit(imm8);
}
+ void vpshufhw(YMMRegister dst, YMMRegister src, uint8_t imm8) {
+ vinstr(0x70, dst, ymm0, src, kF3, k0F, kWIG);
+ emit(imm8);
+ }
void vpshufhw(XMMRegister dst, Operand src, uint8_t imm8) {
- vinstr(0x70, dst, xmm0, src, kF2, k0F, kWIG);
+ vinstr(0x70, dst, xmm0, src, kF3, k0F, kWIG);
+ emit(imm8);
+ }
+ void vpshufhw(YMMRegister dst, Operand src, uint8_t imm8) {
+ vinstr(0x70, dst, ymm0, src, kF3, k0F, kWIG);
emit(imm8);
}
@@ -1685,20 +1700,38 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vinstr(0x0E, dst, src1, src2, k66, k0F3A, kWIG);
emit(mask);
}
+ void vpblendw(YMMRegister dst, YMMRegister src1, YMMRegister src2,
+ uint8_t mask) {
+ vinstr(0x0E, dst, src1, src2, k66, k0F3A, kWIG);
+ emit(mask);
+ }
void vpblendw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t mask) {
vinstr(0x0E, dst, src1, src2, k66, k0F3A, kWIG);
emit(mask);
}
+ void vpblendw(YMMRegister dst, YMMRegister src1, Operand src2, uint8_t mask) {
+ vinstr(0x0E, dst, src1, src2, k66, k0F3A, kWIG);
+ emit(mask);
+ }
void vpalignr(XMMRegister dst, XMMRegister src1, XMMRegister src2,
uint8_t imm8) {
vinstr(0x0F, dst, src1, src2, k66, k0F3A, kWIG);
emit(imm8);
}
+ void vpalignr(YMMRegister dst, YMMRegister src1, YMMRegister src2,
+ uint8_t imm8) {
+ vinstr(0x0F, dst, src1, src2, k66, k0F3A, kWIG);
+ emit(imm8);
+ }
void vpalignr(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) {
vinstr(0x0F, dst, src1, src2, k66, k0F3A, kWIG);
emit(imm8);
}
+ void vpalignr(YMMRegister dst, YMMRegister src1, Operand src2, uint8_t imm8) {
+ vinstr(0x0F, dst, src1, src2, k66, k0F3A, kWIG);
+ emit(imm8);
+ }
void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vps(byte op, YMMRegister dst, YMMRegister src1, YMMRegister src2);
@@ -1775,16 +1808,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void popcntl(Register dst, Operand src);
void bzhiq(Register dst, Register src1, Register src2) {
- bmi2q(kNone, 0xf5, dst, src2, src1);
+ bmi2q(kNoPrefix, 0xf5, dst, src2, src1);
}
void bzhiq(Register dst, Operand src1, Register src2) {
- bmi2q(kNone, 0xf5, dst, src2, src1);
+ bmi2q(kNoPrefix, 0xf5, dst, src2, src1);
}
void bzhil(Register dst, Register src1, Register src2) {
- bmi2l(kNone, 0xf5, dst, src2, src1);
+ bmi2l(kNoPrefix, 0xf5, dst, src2, src1);
}
void bzhil(Register dst, Operand src1, Register src2) {
- bmi2l(kNone, 0xf5, dst, src2, src1);
+ bmi2l(kNoPrefix, 0xf5, dst, src2, src1);
}
void mulxq(Register dst1, Register dst2, Register src) {
bmi2q(kF2, 0xf6, dst1, dst2, src);
diff --git a/deps/v8/src/codegen/x64/cpu-x64.cc b/deps/v8/src/codegen/x64/cpu-x64.cc
index cce76d8c6a..7fd3635683 100644
--- a/deps/v8/src/codegen/x64/cpu-x64.cc
+++ b/deps/v8/src/codegen/x64/cpu-x64.cc
@@ -4,7 +4,7 @@
// CPU specific code for x64 independent of OS goes here.
-#if defined(__GNUC__) && !defined(__MINGW64__)
+#if defined(__GNUC__) && !defined(__MINGW64__) && !defined(GOOGLE3)
#include "src/third_party/valgrind/valgrind.h"
#endif
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index c453d78389..f7a50f786b 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -684,7 +684,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (should_abort_hard()) {
// We don't care if we constructed a frame. Just pretend we did.
- FrameScope assume_frame(this, StackFrame::NONE);
+ FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
Move(arg_reg_1, static_cast<int>(reason));
PrepareCallCFunction(1);
LoadAddress(rax, ExternalReference::abort_with_reason());
@@ -697,7 +697,7 @@ void TurboAssembler::Abort(AbortReason reason) {
if (!has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
+ FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
@@ -859,6 +859,16 @@ void TurboAssembler::Movq(Register dst, XMMRegister src) {
}
}
+void TurboAssembler::Pextrq(Register dst, XMMRegister src, int8_t imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpextrq(dst, src, imm8);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pextrq(dst, src, imm8);
+ }
+}
+
// Helper macro to define qfma macro-assembler. This takes care of every
// possible case of register aliasing to minimize the number of instructions.
#define QFMA(ps_or_pd) \
@@ -952,30 +962,6 @@ void TurboAssembler::F64x2Qfms(XMMRegister dst, XMMRegister src1,
#undef QFMOP
-void TurboAssembler::Movdqa(XMMRegister dst, Operand src) {
- // See comments in Movdqa(XMMRegister, XMMRegister).
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vmovdqa(dst, src);
- } else {
- movaps(dst, src);
- }
-}
-
-void TurboAssembler::Movdqa(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- // Many AVX processors have separate integer/floating-point domains. Use the
- // appropriate instructions.
- vmovdqa(dst, src);
- } else {
- // On SSE, movaps is 1 byte shorter than movdqa, and has the same behavior.
- // Most SSE processors also don't have the same delay moving between integer
- // and floating-point domains.
- movaps(dst, src);
- }
-}
-
void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -1370,6 +1356,16 @@ void TurboAssembler::SmiUntag(Register dst, Operand src) {
}
}
+void TurboAssembler::SmiToInt32(Register reg) {
+ STATIC_ASSERT(kSmiTag == 0);
+ DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
+ if (COMPRESS_POINTERS_BOOL) {
+ sarl(reg, Immediate(kSmiShift));
+ } else {
+ shrq(reg, Immediate(kSmiShift));
+ }
+}
+
void TurboAssembler::SmiCompare(Register smi1, Register smi2) {
AssertSmi(smi1);
AssertSmi(smi2);
@@ -1649,9 +1645,15 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
}
void TurboAssembler::Move(XMMRegister dst, uint64_t high, uint64_t low) {
+ if (high == low) {
+ Move(dst, low);
+ Punpcklqdq(dst, dst);
+ return;
+ }
+
Move(dst, low);
movq(kScratchRegister, high);
- Pinsrq(dst, kScratchRegister, uint8_t{1});
+ Pinsrq(dst, dst, kScratchRegister, uint8_t{1});
}
// ----------------------------------------------------------------------------
@@ -1674,15 +1676,22 @@ void MacroAssembler::Cmp(Operand dst, Handle<Object> source) {
}
}
-void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
- unsigned higher_limit, Label* on_in_range,
- Label::Distance near_jump) {
+void MacroAssembler::CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_LT(lower_limit, higher_limit);
if (lower_limit != 0) {
leal(kScratchRegister, Operand(value, 0u - lower_limit));
cmpl(kScratchRegister, Immediate(higher_limit - lower_limit));
} else {
cmpl(value, Immediate(higher_limit));
}
+}
+
+void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit, Label* on_in_range,
+ Label::Distance near_jump) {
+ CompareRange(value, lower_limit, higher_limit);
j(below_equal, on_in_range, near_jump);
}
@@ -2087,128 +2096,40 @@ void TurboAssembler::JumpCodeTObject(Register code, JumpMode jump_mode) {
}
}
-void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
+void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src,
+ uint8_t imm8) {
if (imm8 == 0) {
Movd(dst, src);
return;
}
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpextrd(dst, src, imm8);
- return;
- } else if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pextrd(dst, src, imm8);
- return;
- }
DCHECK_EQ(1, imm8);
movq(dst, src);
shrq(dst, Immediate(32));
}
namespace {
-
-template <typename Src>
-using AvxFn = void (Assembler::*)(XMMRegister, XMMRegister, Src, uint8_t);
-template <typename Src>
-using NoAvxFn = void (Assembler::*)(XMMRegister, Src, uint8_t);
-
-template <typename Src>
-void PinsrHelper(Assembler* assm, AvxFn<Src> avx, NoAvxFn<Src> noavx,
- XMMRegister dst, XMMRegister src1, Src src2, uint8_t imm8,
- uint32_t* load_pc_offset = nullptr,
- base::Optional<CpuFeature> feature = base::nullopt) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(assm, AVX);
- if (load_pc_offset) *load_pc_offset = assm->pc_offset();
- (assm->*avx)(dst, src1, src2, imm8);
- return;
- }
-
- if (dst != src1) assm->movaps(dst, src1);
- if (load_pc_offset) *load_pc_offset = assm->pc_offset();
- if (feature.has_value()) {
- DCHECK(CpuFeatures::IsSupported(*feature));
- CpuFeatureScope scope(assm, *feature);
- (assm->*noavx)(dst, src2, imm8);
- } else {
- (assm->*noavx)(dst, src2, imm8);
- }
-}
-} // namespace
-
-void TurboAssembler::Pinsrb(XMMRegister dst, XMMRegister src1, Register src2,
- uint8_t imm8, uint32_t* load_pc_offset) {
- PinsrHelper(this, &Assembler::vpinsrb, &Assembler::pinsrb, dst, src1, src2,
- imm8, load_pc_offset, {SSE4_1});
-}
-
-void TurboAssembler::Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2,
- uint8_t imm8, uint32_t* load_pc_offset) {
- PinsrHelper(this, &Assembler::vpinsrb, &Assembler::pinsrb, dst, src1, src2,
- imm8, load_pc_offset, {SSE4_1});
-}
-
-void TurboAssembler::Pinsrw(XMMRegister dst, XMMRegister src1, Register src2,
- uint8_t imm8, uint32_t* load_pc_offset) {
- PinsrHelper(this, &Assembler::vpinsrw, &Assembler::pinsrw, dst, src1, src2,
- imm8, load_pc_offset);
-}
-
-void TurboAssembler::Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2,
- uint8_t imm8, uint32_t* load_pc_offset) {
- PinsrHelper(this, &Assembler::vpinsrw, &Assembler::pinsrw, dst, src1, src2,
- imm8, load_pc_offset);
-}
-
-void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Register src2,
- uint8_t imm8, uint32_t* load_pc_offset) {
- // Need a fall back when SSE4_1 is unavailable. Pinsrb and Pinsrq are used
- // only by Wasm SIMD, which requires SSE4_1 already.
- if (CpuFeatures::IsSupported(SSE4_1)) {
- PinsrHelper(this, &Assembler::vpinsrd, &Assembler::pinsrd, dst, src1, src2,
- imm8, load_pc_offset, {SSE4_1});
- return;
- }
-
- Movd(kScratchDoubleReg, src2);
- if (load_pc_offset) *load_pc_offset = pc_offset();
+template <typename Op>
+void PinsrdPreSse41Helper(TurboAssembler* tasm, XMMRegister dst, Op src,
+ uint8_t imm8, uint32_t* load_pc_offset) {
+ tasm->Movd(kScratchDoubleReg, src);
+ if (load_pc_offset) *load_pc_offset = tasm->pc_offset();
if (imm8 == 1) {
- punpckldq(dst, kScratchDoubleReg);
+ tasm->punpckldq(dst, kScratchDoubleReg);
} else {
DCHECK_EQ(0, imm8);
- Movss(dst, kScratchDoubleReg);
- }
-}
-
-void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
- uint8_t imm8, uint32_t* load_pc_offset) {
- // Need a fall back when SSE4_1 is unavailable. Pinsrb and Pinsrq are used
- // only by Wasm SIMD, which requires SSE4_1 already.
- if (CpuFeatures::IsSupported(SSE4_1)) {
- PinsrHelper(this, &Assembler::vpinsrd, &Assembler::pinsrd, dst, src1, src2,
- imm8, load_pc_offset, {SSE4_1});
- return;
- }
-
- Movd(kScratchDoubleReg, src2);
- if (load_pc_offset) *load_pc_offset = pc_offset();
- if (imm8 == 1) {
- punpckldq(dst, kScratchDoubleReg);
- } else {
- DCHECK_EQ(0, imm8);
- Movss(dst, kScratchDoubleReg);
+ tasm->Movss(dst, kScratchDoubleReg);
}
}
+} // namespace
-void TurboAssembler::Pinsrd(XMMRegister dst, Register src2, uint8_t imm8,
- uint32_t* load_pc_offset) {
- Pinsrd(dst, dst, src2, imm8, load_pc_offset);
+void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Register src, uint8_t imm8,
+ uint32_t* load_pc_offset) {
+ PinsrdPreSse41Helper(this, dst, src, imm8, load_pc_offset);
}
-void TurboAssembler::Pinsrd(XMMRegister dst, Operand src2, uint8_t imm8,
- uint32_t* load_pc_offset) {
- Pinsrd(dst, dst, src2, imm8, load_pc_offset);
+void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
+ uint32_t* load_pc_offset) {
+ PinsrdPreSse41Helper(this, dst, src, imm8, load_pc_offset);
}
void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Register src2,
@@ -2223,46 +2144,6 @@ void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2,
imm8, load_pc_offset, {SSE4_1});
}
-void TurboAssembler::Absps(XMMRegister dst, XMMRegister src) {
- if (!CpuFeatures::IsSupported(AVX) && dst != src) {
- movaps(dst, src);
- src = dst;
- }
- Andps(dst, src,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_float_abs_constant()));
-}
-
-void TurboAssembler::Negps(XMMRegister dst, XMMRegister src) {
- if (!CpuFeatures::IsSupported(AVX) && dst != src) {
- movaps(dst, src);
- src = dst;
- }
- Xorps(dst, src,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_float_neg_constant()));
-}
-
-void TurboAssembler::Abspd(XMMRegister dst, XMMRegister src) {
- if (!CpuFeatures::IsSupported(AVX) && dst != src) {
- movaps(dst, src);
- src = dst;
- }
- Andps(dst, src,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_double_abs_constant()));
-}
-
-void TurboAssembler::Negpd(XMMRegister dst, XMMRegister src) {
- if (!CpuFeatures::IsSupported(AVX) && dst != src) {
- movaps(dst, src);
- src = dst;
- }
- Xorps(dst, src,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_double_neg_constant()));
-}
-
void TurboAssembler::Lzcntl(Register dst, Register src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
@@ -2457,12 +2338,12 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
}
void MacroAssembler::CmpInstanceTypeRange(Register map,
+ Register instance_type_out,
InstanceType lower_limit,
InstanceType higher_limit) {
DCHECK_LT(lower_limit, higher_limit);
- movzxwl(kScratchRegister, FieldOperand(map, Map::kInstanceTypeOffset));
- leal(kScratchRegister, Operand(kScratchRegister, 0u - lower_limit));
- cmpl(kScratchRegister, Immediate(higher_limit - lower_limit));
+ movzxwl(instance_type_out, FieldOperand(map, Map::kInstanceTypeOffset));
+ CompareRange(instance_type_out, lower_limit, higher_limit);
}
void TurboAssembler::AssertNotSmi(Register object) {
@@ -2527,7 +2408,8 @@ void MacroAssembler::AssertFunction(Register object) {
Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
Push(object);
LoadMap(object, object);
- CmpInstanceTypeRange(object, FIRST_JS_FUNCTION_TYPE, LAST_JS_FUNCTION_TYPE);
+ CmpInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE,
+ LAST_JS_FUNCTION_TYPE);
Pop(object);
Check(below_equal, AbortReason::kOperandIsNotAFunction);
}
@@ -2754,8 +2636,10 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Label regular_invoke;
// If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments.
- cmpl(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
- j(equal, &regular_invoke, Label::kFar);
+ if (kDontAdaptArgumentsSentinel != 0) {
+ cmpl(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
+ j(equal, &regular_invoke, Label::kFar);
+ }
// If overapplication or if the actual argument count is equal to the
// formal parameter count, no need to push extra undefined values.
@@ -2808,8 +2692,8 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
bind(&stack_overflow);
{
- FrameScope frame(this,
- has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
CallRuntime(Runtime::kThrowStackOverflow);
int3(); // This should be unreachable.
}
@@ -2820,7 +2704,8 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count) {
ASM_CODE_COMMENT(this);
- FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count);
Push(expected_parameter_count);
@@ -2908,7 +2793,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
bind(&check_offset);
cmpq(bytes_scratch, Immediate(kStackPageSize));
- j(greater, &touch_next_page);
+ j(greater_equal, &touch_next_page);
subq(rsp, bytes_scratch);
}
@@ -2916,7 +2801,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
void TurboAssembler::AllocateStackSpace(int bytes) {
ASM_CODE_COMMENT(this);
DCHECK_GE(bytes, 0);
- while (bytes > kStackPageSize) {
+ while (bytes >= kStackPageSize) {
subq(rsp, Immediate(kStackPageSize));
movb(Operand(rsp, 0), Immediate(0));
bytes -= kStackPageSize;
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index ec35108aba..cf3981a255 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -61,36 +61,6 @@ class V8_EXPORT_PRIVATE TurboAssembler
: public SharedTurboAssemblerBase<TurboAssembler> {
public:
using SharedTurboAssemblerBase<TurboAssembler>::SharedTurboAssemblerBase;
- AVX_OP(Ucomiss, ucomiss)
- AVX_OP(Ucomisd, ucomisd)
- AVX_OP(Pcmpeqb, pcmpeqb)
- AVX_OP(Pcmpeqw, pcmpeqw)
- AVX_OP(Pcmpeqd, pcmpeqd)
- AVX_OP(Movlhps, movlhps)
- AVX_OP_SSE4_1(Pcmpeqq, pcmpeqq)
- AVX_OP_SSE4_1(Packusdw, packusdw)
- AVX_OP_SSE4_1(Insertps, insertps)
- AVX_OP_SSE4_1(Pinsrq, pinsrq)
- AVX_OP_SSE4_1(Pextrq, pextrq)
- AVX_OP_SSE4_1(Roundss, roundss)
- AVX_OP_SSE4_1(Roundsd, roundsd)
-
-#undef AVX_OP
-
- // Define movq here instead of using AVX_OP. movq is defined using templates
- // and there is a function template `void movq(P1)`, while technically
- // impossible, will be selected when deducing the arguments for AvxHelper.
- void Movq(XMMRegister dst, Register src);
- void Movq(Register dst, XMMRegister src);
-
- void F64x2Qfma(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister src3, XMMRegister tmp);
- void F64x2Qfms(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister src3, XMMRegister tmp);
- void F32x4Qfma(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister src3, XMMRegister tmp);
- void F32x4Qfms(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister src3, XMMRegister tmp);
void PushReturnAddressFrom(Register src) { pushq(src); }
void PopReturnAddressTo(Register dst) { popq(dst); }
@@ -149,8 +119,11 @@ class V8_EXPORT_PRIVATE TurboAssembler
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
- void Movdqa(XMMRegister dst, Operand src);
- void Movdqa(XMMRegister dst, XMMRegister src);
+ // Define movq here instead of using AVX_OP. movq is defined using templates
+ // and there is a function template `void movq(P1)`, while technically
+ // impossible, will be selected when deducing the arguments for AvxHelper.
+ void Movq(XMMRegister dst, Register src);
+ void Movq(Register dst, XMMRegister src);
void Cvtss2sd(XMMRegister dst, XMMRegister src);
void Cvtss2sd(XMMRegister dst, Operand src);
@@ -191,6 +164,28 @@ class V8_EXPORT_PRIVATE TurboAssembler
void Cvtlsi2sd(XMMRegister dst, Register src);
void Cvtlsi2sd(XMMRegister dst, Operand src);
+ void PextrdPreSse41(Register dst, XMMRegister src, uint8_t imm8);
+ void Pextrq(Register dst, XMMRegister src, int8_t imm8);
+
+ void PinsrdPreSse41(XMMRegister dst, Register src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void PinsrdPreSse41(XMMRegister dst, Operand src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+
+ void Pinsrq(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+
+ void F64x2Qfma(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+ void F64x2Qfms(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+ void F32x4Qfma(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+ void F32x4Qfms(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+
void Lzcntq(Register dst, Register src);
void Lzcntq(Register dst, Operand src);
void Lzcntl(Register dst, Register src);
@@ -357,6 +352,9 @@ class V8_EXPORT_PRIVATE TurboAssembler
void SmiUntag(Register dst, Register src);
void SmiUntag(Register dst, Operand src);
+ // Convert smi to 32-bit value.
+ void SmiToInt32(Register reg);
+
// Loads the address of the external reference into the destination
// register.
void LoadAddress(Register destination, ExternalReference source);
@@ -424,35 +422,6 @@ class V8_EXPORT_PRIVATE TurboAssembler
void Trap();
void DebugBreak();
- // Non-SSE2 instructions.
- void Pextrd(Register dst, XMMRegister src, uint8_t imm8);
-
- void Pinsrb(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8,
- uint32_t* load_pc_offset = nullptr);
- void Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8,
- uint32_t* load_pc_offset = nullptr);
- void Pinsrw(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8,
- uint32_t* load_pc_offset = nullptr);
- void Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8,
- uint32_t* load_pc_offset = nullptr);
- void Pinsrd(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8,
- uint32_t* load_pc_offset = nullptr);
- void Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8,
- uint32_t* load_pc_offset = nullptr);
- void Pinsrd(XMMRegister dst, Register src2, uint8_t imm8,
- uint32_t* load_pc_offset = nullptr);
- void Pinsrd(XMMRegister dst, Operand src2, uint8_t imm8,
- uint32_t* load_pc_offset = nullptr);
- void Pinsrq(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8,
- uint32_t* load_pc_offset = nullptr);
- void Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8,
- uint32_t* load_pc_offset = nullptr);
-
- void Absps(XMMRegister dst, XMMRegister src);
- void Negps(XMMRegister dst, XMMRegister src);
- void Abspd(XMMRegister dst, XMMRegister src);
- void Negpd(XMMRegister dst, XMMRegister src);
-
void CompareRoot(Register with, RootIndex index);
void CompareRoot(Operand with, RootIndex index);
@@ -781,7 +750,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void Cmp(Operand dst, Handle<Object> source);
// Checks if value is in range [lower_limit, higher_limit] using a single
- // comparison.
+ // comparison. Flags CF=1 or ZF=1 indicate the value is in the range
+ // (condition below_equal).
+ void CompareRange(Register value, unsigned lower_limit,
+ unsigned higher_limit);
void JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit, Label* on_in_range,
Label::Distance near_jump = Label::kFar);
@@ -815,7 +787,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Compare instance type ranges for a map (low and high inclusive)
// Always use unsigned comparisons: below_equal for a positive result.
- void CmpInstanceTypeRange(Register map, InstanceType low, InstanceType high);
+ void CmpInstanceTypeRange(Register map, Register instance_type_out,
+ InstanceType low, InstanceType high);
template <typename Field>
void DecodeField(Register reg) {
diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h
index 6df1da88ae..795a6cc826 100644
--- a/deps/v8/src/common/globals.h
+++ b/deps/v8/src/common/globals.h
@@ -877,7 +877,7 @@ enum MinimumCapacity {
USE_CUSTOM_MINIMUM_CAPACITY
};
-enum GarbageCollector { SCAVENGER, MARK_COMPACTOR, MINOR_MARK_COMPACTOR };
+enum class GarbageCollector { SCAVENGER, MARK_COMPACTOR, MINOR_MARK_COMPACTOR };
enum class CompactionSpaceKind {
kNone,
diff --git a/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc
index e57463d404..03b2fe5512 100644
--- a/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc
@@ -8,7 +8,7 @@
#include "src/base/platform/time.h"
#include "src/codegen/compiler.h"
#include "src/flags/flags.h"
-#include "src/handles/global-handles.h"
+#include "src/handles/global-handles-inl.h"
#include "src/logging/counters.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/objects-inl.h"
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index e68ced7460..2ad2c9e945 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -836,7 +836,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
// occuring before a fast mode holder on the chain.
return Invalid();
}
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
if (details.kind() == kData) {
return ComputeDataFieldAccessInfo(receiver_map, map, holder, index,
access_mode);
@@ -846,7 +846,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
return Invalid();
}
} else {
- DCHECK_EQ(kDescriptor, details.location());
+ DCHECK_EQ(PropertyLocation::kDescriptor, details.location());
DCHECK_EQ(kAccessor, details.kind());
return ComputeAccessorDescriptorAccessInfo(receiver_map, name, map,
holder, index, access_mode);
@@ -1130,7 +1130,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
if (details.IsReadOnly()) return Invalid();
// TODO(bmeurer): Handle transition to data constant?
- if (details.location() != kField) return Invalid();
+ if (details.location() != PropertyLocation::kField) return Invalid();
int const index = details.field_index();
Representation details_representation = details.representation();
@@ -1172,8 +1172,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
if (descriptors_field_type->IsClass()) {
unrecorded_dependencies.push_back(
dependencies()->FieldTypeDependencyOffTheRecord(
- transition_map, number,
- MakeRef<Object>(broker(), descriptors_field_type)));
+ transition_map, number, *descriptors_field_type_ref));
// Remember the field map, and try to infer a useful type.
base::Optional<MapRef> maybe_field_map =
TryMakeRef(broker(), descriptors_field_type->AsClass());
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index 7bc90fd822..b70c641db8 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -767,8 +767,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchPrepareCallCFunction: {
- int const num_parameters = MiscField::decode(instr->opcode());
- __ PrepareCallCFunction(num_parameters);
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_gp_parameters + num_fp_parameters);
// Frame alignment requires using FP-relative frame addressing.
frame_access_state()->SetFrameAccessToFP();
break;
@@ -853,13 +854,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleArchTableSwitch(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == r1);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ stop();
diff --git a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
index c0200917b9..d4e0c2c457 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
@@ -11,357 +11,362 @@ namespace compiler {
// ARM-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(ArmAdd) \
- V(ArmAnd) \
- V(ArmBic) \
- V(ArmClz) \
- V(ArmCmp) \
- V(ArmCmn) \
- V(ArmTst) \
- V(ArmTeq) \
- V(ArmOrr) \
- V(ArmEor) \
- V(ArmSub) \
- V(ArmRsb) \
- V(ArmMul) \
- V(ArmMla) \
- V(ArmMls) \
- V(ArmSmull) \
- V(ArmSmmul) \
- V(ArmSmmla) \
- V(ArmUmull) \
- V(ArmSdiv) \
- V(ArmUdiv) \
- V(ArmMov) \
- V(ArmMvn) \
- V(ArmBfc) \
- V(ArmUbfx) \
- V(ArmSbfx) \
- V(ArmSxtb) \
- V(ArmSxth) \
- V(ArmSxtab) \
- V(ArmSxtah) \
- V(ArmUxtb) \
- V(ArmUxth) \
- V(ArmUxtab) \
- V(ArmRbit) \
- V(ArmRev) \
- V(ArmUxtah) \
- V(ArmAddPair) \
- V(ArmSubPair) \
- V(ArmMulPair) \
- V(ArmLslPair) \
- V(ArmLsrPair) \
- V(ArmAsrPair) \
- V(ArmVcmpF32) \
- V(ArmVaddF32) \
- V(ArmVsubF32) \
- V(ArmVmulF32) \
- V(ArmVmlaF32) \
- V(ArmVmlsF32) \
- V(ArmVdivF32) \
- V(ArmVabsF32) \
- V(ArmVnegF32) \
- V(ArmVsqrtF32) \
- V(ArmVcmpF64) \
- V(ArmVaddF64) \
- V(ArmVsubF64) \
- V(ArmVmulF64) \
- V(ArmVmlaF64) \
- V(ArmVmlsF64) \
- V(ArmVdivF64) \
- V(ArmVmodF64) \
- V(ArmVabsF64) \
- V(ArmVnegF64) \
- V(ArmVsqrtF64) \
- V(ArmVmullLow) \
- V(ArmVmullHigh) \
- V(ArmVrintmF32) \
- V(ArmVrintmF64) \
- V(ArmVrintpF32) \
- V(ArmVrintpF64) \
- V(ArmVrintzF32) \
- V(ArmVrintzF64) \
- V(ArmVrintaF64) \
- V(ArmVrintnF32) \
- V(ArmVrintnF64) \
- V(ArmVcvtF32F64) \
- V(ArmVcvtF64F32) \
- V(ArmVcvtF32S32) \
- V(ArmVcvtF32U32) \
- V(ArmVcvtF64S32) \
- V(ArmVcvtF64U32) \
- V(ArmVcvtS32F32) \
- V(ArmVcvtU32F32) \
- V(ArmVcvtS32F64) \
- V(ArmVcvtU32F64) \
- V(ArmVmovU32F32) \
- V(ArmVmovF32U32) \
- V(ArmVmovLowU32F64) \
- V(ArmVmovLowF64U32) \
- V(ArmVmovHighU32F64) \
- V(ArmVmovHighF64U32) \
- V(ArmVmovF64U32U32) \
- V(ArmVmovU32U32F64) \
- V(ArmVldrF32) \
- V(ArmVstrF32) \
- V(ArmVldrF64) \
- V(ArmVld1F64) \
- V(ArmVstrF64) \
- V(ArmVst1F64) \
- V(ArmVld1S128) \
- V(ArmVst1S128) \
- V(ArmVcnt) \
- V(ArmVpadal) \
- V(ArmVpaddl) \
- V(ArmFloat32Max) \
- V(ArmFloat64Max) \
- V(ArmFloat32Min) \
- V(ArmFloat64Min) \
- V(ArmFloat64SilenceNaN) \
- V(ArmLdrb) \
- V(ArmLdrsb) \
- V(ArmStrb) \
- V(ArmLdrh) \
- V(ArmLdrsh) \
- V(ArmStrh) \
- V(ArmLdr) \
- V(ArmStr) \
- V(ArmPush) \
- V(ArmPoke) \
- V(ArmPeek) \
- V(ArmDmbIsh) \
- V(ArmDsbIsb) \
- V(ArmF64x2Splat) \
- V(ArmF64x2ExtractLane) \
- V(ArmF64x2ReplaceLane) \
- V(ArmF64x2Abs) \
- V(ArmF64x2Neg) \
- V(ArmF64x2Sqrt) \
- V(ArmF64x2Add) \
- V(ArmF64x2Sub) \
- V(ArmF64x2Mul) \
- V(ArmF64x2Div) \
- V(ArmF64x2Min) \
- V(ArmF64x2Max) \
- V(ArmF64x2Eq) \
- V(ArmF64x2Ne) \
- V(ArmF64x2Lt) \
- V(ArmF64x2Le) \
- V(ArmF64x2Pmin) \
- V(ArmF64x2Pmax) \
- V(ArmF64x2Ceil) \
- V(ArmF64x2Floor) \
- V(ArmF64x2Trunc) \
- V(ArmF64x2NearestInt) \
- V(ArmF64x2ConvertLowI32x4S) \
- V(ArmF64x2ConvertLowI32x4U) \
- V(ArmF64x2PromoteLowF32x4) \
- V(ArmF32x4Splat) \
- V(ArmF32x4ExtractLane) \
- V(ArmF32x4ReplaceLane) \
- V(ArmF32x4SConvertI32x4) \
- V(ArmF32x4UConvertI32x4) \
- V(ArmF32x4Abs) \
- V(ArmF32x4Neg) \
- V(ArmF32x4Sqrt) \
- V(ArmF32x4RecipApprox) \
- V(ArmF32x4RecipSqrtApprox) \
- V(ArmF32x4Add) \
- V(ArmF32x4Sub) \
- V(ArmF32x4Mul) \
- V(ArmF32x4Div) \
- V(ArmF32x4Min) \
- V(ArmF32x4Max) \
- V(ArmF32x4Eq) \
- V(ArmF32x4Ne) \
- V(ArmF32x4Lt) \
- V(ArmF32x4Le) \
- V(ArmF32x4Pmin) \
- V(ArmF32x4Pmax) \
- V(ArmF32x4DemoteF64x2Zero) \
- V(ArmI64x2SplatI32Pair) \
- V(ArmI64x2ReplaceLaneI32Pair) \
- V(ArmI64x2Abs) \
- V(ArmI64x2Neg) \
- V(ArmI64x2Shl) \
- V(ArmI64x2ShrS) \
- V(ArmI64x2Add) \
- V(ArmI64x2Sub) \
- V(ArmI64x2Mul) \
- V(ArmI64x2ShrU) \
- V(ArmI64x2BitMask) \
- V(ArmI64x2Eq) \
- V(ArmI64x2Ne) \
- V(ArmI64x2GtS) \
- V(ArmI64x2GeS) \
- V(ArmI64x2SConvertI32x4Low) \
- V(ArmI64x2SConvertI32x4High) \
- V(ArmI64x2UConvertI32x4Low) \
- V(ArmI64x2UConvertI32x4High) \
- V(ArmI32x4Splat) \
- V(ArmI32x4ExtractLane) \
- V(ArmI32x4ReplaceLane) \
- V(ArmI32x4SConvertF32x4) \
- V(ArmI32x4SConvertI16x8Low) \
- V(ArmI32x4SConvertI16x8High) \
- V(ArmI32x4Neg) \
- V(ArmI32x4Shl) \
- V(ArmI32x4ShrS) \
- V(ArmI32x4Add) \
- V(ArmI32x4Sub) \
- V(ArmI32x4Mul) \
- V(ArmI32x4MinS) \
- V(ArmI32x4MaxS) \
- V(ArmI32x4Eq) \
- V(ArmI32x4Ne) \
- V(ArmI32x4GtS) \
- V(ArmI32x4GeS) \
- V(ArmI32x4UConvertF32x4) \
- V(ArmI32x4UConvertI16x8Low) \
- V(ArmI32x4UConvertI16x8High) \
- V(ArmI32x4ShrU) \
- V(ArmI32x4MinU) \
- V(ArmI32x4MaxU) \
- V(ArmI32x4GtU) \
- V(ArmI32x4GeU) \
- V(ArmI32x4Abs) \
- V(ArmI32x4BitMask) \
- V(ArmI32x4DotI16x8S) \
- V(ArmI32x4TruncSatF64x2SZero) \
- V(ArmI32x4TruncSatF64x2UZero) \
- V(ArmI16x8Splat) \
- V(ArmI16x8ExtractLaneS) \
- V(ArmI16x8ReplaceLane) \
- V(ArmI16x8SConvertI8x16Low) \
- V(ArmI16x8SConvertI8x16High) \
- V(ArmI16x8Neg) \
- V(ArmI16x8Shl) \
- V(ArmI16x8ShrS) \
- V(ArmI16x8SConvertI32x4) \
- V(ArmI16x8Add) \
- V(ArmI16x8AddSatS) \
- V(ArmI16x8Sub) \
- V(ArmI16x8SubSatS) \
- V(ArmI16x8Mul) \
- V(ArmI16x8MinS) \
- V(ArmI16x8MaxS) \
- V(ArmI16x8Eq) \
- V(ArmI16x8Ne) \
- V(ArmI16x8GtS) \
- V(ArmI16x8GeS) \
- V(ArmI16x8ExtractLaneU) \
- V(ArmI16x8UConvertI8x16Low) \
- V(ArmI16x8UConvertI8x16High) \
- V(ArmI16x8ShrU) \
- V(ArmI16x8UConvertI32x4) \
- V(ArmI16x8AddSatU) \
- V(ArmI16x8SubSatU) \
- V(ArmI16x8MinU) \
- V(ArmI16x8MaxU) \
- V(ArmI16x8GtU) \
- V(ArmI16x8GeU) \
- V(ArmI16x8RoundingAverageU) \
- V(ArmI16x8Abs) \
- V(ArmI16x8BitMask) \
- V(ArmI16x8Q15MulRSatS) \
- V(ArmI8x16Splat) \
- V(ArmI8x16ExtractLaneS) \
- V(ArmI8x16ReplaceLane) \
- V(ArmI8x16Neg) \
- V(ArmI8x16Shl) \
- V(ArmI8x16ShrS) \
- V(ArmI8x16SConvertI16x8) \
- V(ArmI8x16Add) \
- V(ArmI8x16AddSatS) \
- V(ArmI8x16Sub) \
- V(ArmI8x16SubSatS) \
- V(ArmI8x16MinS) \
- V(ArmI8x16MaxS) \
- V(ArmI8x16Eq) \
- V(ArmI8x16Ne) \
- V(ArmI8x16GtS) \
- V(ArmI8x16GeS) \
- V(ArmI8x16ExtractLaneU) \
- V(ArmI8x16ShrU) \
- V(ArmI8x16UConvertI16x8) \
- V(ArmI8x16AddSatU) \
- V(ArmI8x16SubSatU) \
- V(ArmI8x16MinU) \
- V(ArmI8x16MaxU) \
- V(ArmI8x16GtU) \
- V(ArmI8x16GeU) \
- V(ArmI8x16RoundingAverageU) \
- V(ArmI8x16Abs) \
- V(ArmI8x16BitMask) \
- V(ArmS128Const) \
- V(ArmS128Zero) \
- V(ArmS128AllOnes) \
- V(ArmS128Dup) \
- V(ArmS128And) \
- V(ArmS128Or) \
- V(ArmS128Xor) \
- V(ArmS128Not) \
- V(ArmS128Select) \
- V(ArmS128AndNot) \
- V(ArmS32x4ZipLeft) \
- V(ArmS32x4ZipRight) \
- V(ArmS32x4UnzipLeft) \
- V(ArmS32x4UnzipRight) \
- V(ArmS32x4TransposeLeft) \
- V(ArmS32x4TransposeRight) \
- V(ArmS32x4Shuffle) \
- V(ArmS16x8ZipLeft) \
- V(ArmS16x8ZipRight) \
- V(ArmS16x8UnzipLeft) \
- V(ArmS16x8UnzipRight) \
- V(ArmS16x8TransposeLeft) \
- V(ArmS16x8TransposeRight) \
- V(ArmS8x16ZipLeft) \
- V(ArmS8x16ZipRight) \
- V(ArmS8x16UnzipLeft) \
- V(ArmS8x16UnzipRight) \
- V(ArmS8x16TransposeLeft) \
- V(ArmS8x16TransposeRight) \
- V(ArmS8x16Concat) \
- V(ArmI8x16Swizzle) \
- V(ArmI8x16Shuffle) \
- V(ArmS32x2Reverse) \
- V(ArmS16x4Reverse) \
- V(ArmS16x2Reverse) \
- V(ArmS8x8Reverse) \
- V(ArmS8x4Reverse) \
- V(ArmS8x2Reverse) \
- V(ArmI64x2AllTrue) \
- V(ArmI32x4AllTrue) \
- V(ArmI16x8AllTrue) \
- V(ArmV128AnyTrue) \
- V(ArmI8x16AllTrue) \
- V(ArmS128Load8Splat) \
- V(ArmS128Load16Splat) \
- V(ArmS128Load32Splat) \
- V(ArmS128Load64Splat) \
- V(ArmS128Load8x8S) \
- V(ArmS128Load8x8U) \
- V(ArmS128Load16x4S) \
- V(ArmS128Load16x4U) \
- V(ArmS128Load32x2S) \
- V(ArmS128Load32x2U) \
- V(ArmS128Load32Zero) \
- V(ArmS128Load64Zero) \
- V(ArmS128LoadLaneLow) \
- V(ArmS128LoadLaneHigh) \
- V(ArmS128StoreLaneLow) \
- V(ArmS128StoreLaneHigh) \
- V(ArmWord32AtomicPairLoad) \
- V(ArmWord32AtomicPairStore) \
- V(ArmWord32AtomicPairAdd) \
- V(ArmWord32AtomicPairSub) \
- V(ArmWord32AtomicPairAnd) \
- V(ArmWord32AtomicPairOr) \
- V(ArmWord32AtomicPairXor) \
- V(ArmWord32AtomicPairExchange) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(ArmAdd) \
+ V(ArmAnd) \
+ V(ArmBic) \
+ V(ArmClz) \
+ V(ArmCmp) \
+ V(ArmCmn) \
+ V(ArmTst) \
+ V(ArmTeq) \
+ V(ArmOrr) \
+ V(ArmEor) \
+ V(ArmSub) \
+ V(ArmRsb) \
+ V(ArmMul) \
+ V(ArmMla) \
+ V(ArmMls) \
+ V(ArmSmull) \
+ V(ArmSmmul) \
+ V(ArmSmmla) \
+ V(ArmUmull) \
+ V(ArmSdiv) \
+ V(ArmUdiv) \
+ V(ArmMov) \
+ V(ArmMvn) \
+ V(ArmBfc) \
+ V(ArmUbfx) \
+ V(ArmSbfx) \
+ V(ArmSxtb) \
+ V(ArmSxth) \
+ V(ArmSxtab) \
+ V(ArmSxtah) \
+ V(ArmUxtb) \
+ V(ArmUxth) \
+ V(ArmUxtab) \
+ V(ArmRbit) \
+ V(ArmRev) \
+ V(ArmUxtah) \
+ V(ArmAddPair) \
+ V(ArmSubPair) \
+ V(ArmMulPair) \
+ V(ArmLslPair) \
+ V(ArmLsrPair) \
+ V(ArmAsrPair) \
+ V(ArmVcmpF32) \
+ V(ArmVaddF32) \
+ V(ArmVsubF32) \
+ V(ArmVmulF32) \
+ V(ArmVmlaF32) \
+ V(ArmVmlsF32) \
+ V(ArmVdivF32) \
+ V(ArmVabsF32) \
+ V(ArmVnegF32) \
+ V(ArmVsqrtF32) \
+ V(ArmVcmpF64) \
+ V(ArmVaddF64) \
+ V(ArmVsubF64) \
+ V(ArmVmulF64) \
+ V(ArmVmlaF64) \
+ V(ArmVmlsF64) \
+ V(ArmVdivF64) \
+ V(ArmVmodF64) \
+ V(ArmVabsF64) \
+ V(ArmVnegF64) \
+ V(ArmVsqrtF64) \
+ V(ArmVmullLow) \
+ V(ArmVmullHigh) \
+ V(ArmVrintmF32) \
+ V(ArmVrintmF64) \
+ V(ArmVrintpF32) \
+ V(ArmVrintpF64) \
+ V(ArmVrintzF32) \
+ V(ArmVrintzF64) \
+ V(ArmVrintaF64) \
+ V(ArmVrintnF32) \
+ V(ArmVrintnF64) \
+ V(ArmVcvtF32F64) \
+ V(ArmVcvtF64F32) \
+ V(ArmVcvtF32S32) \
+ V(ArmVcvtF32U32) \
+ V(ArmVcvtF64S32) \
+ V(ArmVcvtF64U32) \
+ V(ArmVcvtS32F32) \
+ V(ArmVcvtU32F32) \
+ V(ArmVcvtS32F64) \
+ V(ArmVcvtU32F64) \
+ V(ArmVmovU32F32) \
+ V(ArmVmovF32U32) \
+ V(ArmVmovLowU32F64) \
+ V(ArmVmovLowF64U32) \
+ V(ArmVmovHighU32F64) \
+ V(ArmVmovHighF64U32) \
+ V(ArmVmovF64U32U32) \
+ V(ArmVmovU32U32F64) \
+ V(ArmVldrF32) \
+ V(ArmVstrF32) \
+ V(ArmVldrF64) \
+ V(ArmVld1F64) \
+ V(ArmVstrF64) \
+ V(ArmVst1F64) \
+ V(ArmVld1S128) \
+ V(ArmVst1S128) \
+ V(ArmVcnt) \
+ V(ArmVpadal) \
+ V(ArmVpaddl) \
+ V(ArmFloat32Max) \
+ V(ArmFloat64Max) \
+ V(ArmFloat32Min) \
+ V(ArmFloat64Min) \
+ V(ArmFloat64SilenceNaN) \
+ V(ArmLdrb) \
+ V(ArmLdrsb) \
+ V(ArmStrb) \
+ V(ArmLdrh) \
+ V(ArmLdrsh) \
+ V(ArmStrh) \
+ V(ArmLdr) \
+ V(ArmStr) \
+ V(ArmPush) \
+ V(ArmPoke) \
+ V(ArmPeek) \
+ V(ArmDmbIsh) \
+ V(ArmDsbIsb) \
+ V(ArmF64x2Splat) \
+ V(ArmF64x2ExtractLane) \
+ V(ArmF64x2ReplaceLane) \
+ V(ArmF64x2Abs) \
+ V(ArmF64x2Neg) \
+ V(ArmF64x2Sqrt) \
+ V(ArmF64x2Add) \
+ V(ArmF64x2Sub) \
+ V(ArmF64x2Mul) \
+ V(ArmF64x2Div) \
+ V(ArmF64x2Min) \
+ V(ArmF64x2Max) \
+ V(ArmF64x2Eq) \
+ V(ArmF64x2Ne) \
+ V(ArmF64x2Lt) \
+ V(ArmF64x2Le) \
+ V(ArmF64x2Pmin) \
+ V(ArmF64x2Pmax) \
+ V(ArmF64x2Ceil) \
+ V(ArmF64x2Floor) \
+ V(ArmF64x2Trunc) \
+ V(ArmF64x2NearestInt) \
+ V(ArmF64x2ConvertLowI32x4S) \
+ V(ArmF64x2ConvertLowI32x4U) \
+ V(ArmF64x2PromoteLowF32x4) \
+ V(ArmF32x4Splat) \
+ V(ArmF32x4ExtractLane) \
+ V(ArmF32x4ReplaceLane) \
+ V(ArmF32x4SConvertI32x4) \
+ V(ArmF32x4UConvertI32x4) \
+ V(ArmF32x4Abs) \
+ V(ArmF32x4Neg) \
+ V(ArmF32x4Sqrt) \
+ V(ArmF32x4RecipApprox) \
+ V(ArmF32x4RecipSqrtApprox) \
+ V(ArmF32x4Add) \
+ V(ArmF32x4Sub) \
+ V(ArmF32x4Mul) \
+ V(ArmF32x4Div) \
+ V(ArmF32x4Min) \
+ V(ArmF32x4Max) \
+ V(ArmF32x4Eq) \
+ V(ArmF32x4Ne) \
+ V(ArmF32x4Lt) \
+ V(ArmF32x4Le) \
+ V(ArmF32x4Pmin) \
+ V(ArmF32x4Pmax) \
+ V(ArmF32x4DemoteF64x2Zero) \
+ V(ArmI64x2SplatI32Pair) \
+ V(ArmI64x2ReplaceLaneI32Pair) \
+ V(ArmI64x2Abs) \
+ V(ArmI64x2Neg) \
+ V(ArmI64x2Shl) \
+ V(ArmI64x2ShrS) \
+ V(ArmI64x2Add) \
+ V(ArmI64x2Sub) \
+ V(ArmI64x2Mul) \
+ V(ArmI64x2ShrU) \
+ V(ArmI64x2BitMask) \
+ V(ArmI64x2Eq) \
+ V(ArmI64x2Ne) \
+ V(ArmI64x2GtS) \
+ V(ArmI64x2GeS) \
+ V(ArmI64x2SConvertI32x4Low) \
+ V(ArmI64x2SConvertI32x4High) \
+ V(ArmI64x2UConvertI32x4Low) \
+ V(ArmI64x2UConvertI32x4High) \
+ V(ArmI32x4Splat) \
+ V(ArmI32x4ExtractLane) \
+ V(ArmI32x4ReplaceLane) \
+ V(ArmI32x4SConvertF32x4) \
+ V(ArmI32x4SConvertI16x8Low) \
+ V(ArmI32x4SConvertI16x8High) \
+ V(ArmI32x4Neg) \
+ V(ArmI32x4Shl) \
+ V(ArmI32x4ShrS) \
+ V(ArmI32x4Add) \
+ V(ArmI32x4Sub) \
+ V(ArmI32x4Mul) \
+ V(ArmI32x4MinS) \
+ V(ArmI32x4MaxS) \
+ V(ArmI32x4Eq) \
+ V(ArmI32x4Ne) \
+ V(ArmI32x4GtS) \
+ V(ArmI32x4GeS) \
+ V(ArmI32x4UConvertF32x4) \
+ V(ArmI32x4UConvertI16x8Low) \
+ V(ArmI32x4UConvertI16x8High) \
+ V(ArmI32x4ShrU) \
+ V(ArmI32x4MinU) \
+ V(ArmI32x4MaxU) \
+ V(ArmI32x4GtU) \
+ V(ArmI32x4GeU) \
+ V(ArmI32x4Abs) \
+ V(ArmI32x4BitMask) \
+ V(ArmI32x4DotI16x8S) \
+ V(ArmI32x4TruncSatF64x2SZero) \
+ V(ArmI32x4TruncSatF64x2UZero) \
+ V(ArmI16x8Splat) \
+ V(ArmI16x8ExtractLaneS) \
+ V(ArmI16x8ReplaceLane) \
+ V(ArmI16x8SConvertI8x16Low) \
+ V(ArmI16x8SConvertI8x16High) \
+ V(ArmI16x8Neg) \
+ V(ArmI16x8Shl) \
+ V(ArmI16x8ShrS) \
+ V(ArmI16x8SConvertI32x4) \
+ V(ArmI16x8Add) \
+ V(ArmI16x8AddSatS) \
+ V(ArmI16x8Sub) \
+ V(ArmI16x8SubSatS) \
+ V(ArmI16x8Mul) \
+ V(ArmI16x8MinS) \
+ V(ArmI16x8MaxS) \
+ V(ArmI16x8Eq) \
+ V(ArmI16x8Ne) \
+ V(ArmI16x8GtS) \
+ V(ArmI16x8GeS) \
+ V(ArmI16x8ExtractLaneU) \
+ V(ArmI16x8UConvertI8x16Low) \
+ V(ArmI16x8UConvertI8x16High) \
+ V(ArmI16x8ShrU) \
+ V(ArmI16x8UConvertI32x4) \
+ V(ArmI16x8AddSatU) \
+ V(ArmI16x8SubSatU) \
+ V(ArmI16x8MinU) \
+ V(ArmI16x8MaxU) \
+ V(ArmI16x8GtU) \
+ V(ArmI16x8GeU) \
+ V(ArmI16x8RoundingAverageU) \
+ V(ArmI16x8Abs) \
+ V(ArmI16x8BitMask) \
+ V(ArmI16x8Q15MulRSatS) \
+ V(ArmI8x16Splat) \
+ V(ArmI8x16ExtractLaneS) \
+ V(ArmI8x16ReplaceLane) \
+ V(ArmI8x16Neg) \
+ V(ArmI8x16Shl) \
+ V(ArmI8x16ShrS) \
+ V(ArmI8x16SConvertI16x8) \
+ V(ArmI8x16Add) \
+ V(ArmI8x16AddSatS) \
+ V(ArmI8x16Sub) \
+ V(ArmI8x16SubSatS) \
+ V(ArmI8x16MinS) \
+ V(ArmI8x16MaxS) \
+ V(ArmI8x16Eq) \
+ V(ArmI8x16Ne) \
+ V(ArmI8x16GtS) \
+ V(ArmI8x16GeS) \
+ V(ArmI8x16ExtractLaneU) \
+ V(ArmI8x16ShrU) \
+ V(ArmI8x16UConvertI16x8) \
+ V(ArmI8x16AddSatU) \
+ V(ArmI8x16SubSatU) \
+ V(ArmI8x16MinU) \
+ V(ArmI8x16MaxU) \
+ V(ArmI8x16GtU) \
+ V(ArmI8x16GeU) \
+ V(ArmI8x16RoundingAverageU) \
+ V(ArmI8x16Abs) \
+ V(ArmI8x16BitMask) \
+ V(ArmS128Const) \
+ V(ArmS128Zero) \
+ V(ArmS128AllOnes) \
+ V(ArmS128Dup) \
+ V(ArmS128And) \
+ V(ArmS128Or) \
+ V(ArmS128Xor) \
+ V(ArmS128Not) \
+ V(ArmS128Select) \
+ V(ArmS128AndNot) \
+ V(ArmS32x4ZipLeft) \
+ V(ArmS32x4ZipRight) \
+ V(ArmS32x4UnzipLeft) \
+ V(ArmS32x4UnzipRight) \
+ V(ArmS32x4TransposeLeft) \
+ V(ArmS32x4TransposeRight) \
+ V(ArmS32x4Shuffle) \
+ V(ArmS16x8ZipLeft) \
+ V(ArmS16x8ZipRight) \
+ V(ArmS16x8UnzipLeft) \
+ V(ArmS16x8UnzipRight) \
+ V(ArmS16x8TransposeLeft) \
+ V(ArmS16x8TransposeRight) \
+ V(ArmS8x16ZipLeft) \
+ V(ArmS8x16ZipRight) \
+ V(ArmS8x16UnzipLeft) \
+ V(ArmS8x16UnzipRight) \
+ V(ArmS8x16TransposeLeft) \
+ V(ArmS8x16TransposeRight) \
+ V(ArmS8x16Concat) \
+ V(ArmI8x16Swizzle) \
+ V(ArmI8x16Shuffle) \
+ V(ArmS32x2Reverse) \
+ V(ArmS16x4Reverse) \
+ V(ArmS16x2Reverse) \
+ V(ArmS8x8Reverse) \
+ V(ArmS8x4Reverse) \
+ V(ArmS8x2Reverse) \
+ V(ArmI64x2AllTrue) \
+ V(ArmI32x4AllTrue) \
+ V(ArmI16x8AllTrue) \
+ V(ArmV128AnyTrue) \
+ V(ArmI8x16AllTrue) \
+ V(ArmS128Load8Splat) \
+ V(ArmS128Load16Splat) \
+ V(ArmS128Load32Splat) \
+ V(ArmS128Load64Splat) \
+ V(ArmS128Load8x8S) \
+ V(ArmS128Load8x8U) \
+ V(ArmS128Load16x4S) \
+ V(ArmS128Load16x4U) \
+ V(ArmS128Load32x2S) \
+ V(ArmS128Load32x2U) \
+ V(ArmS128Load32Zero) \
+ V(ArmS128Load64Zero) \
+ V(ArmS128LoadLaneLow) \
+ V(ArmS128LoadLaneHigh) \
+ V(ArmS128StoreLaneLow) \
+ V(ArmS128StoreLaneHigh) \
+ V(ArmWord32AtomicPairLoad) \
+ V(ArmWord32AtomicPairStore) \
+ V(ArmWord32AtomicPairAdd) \
+ V(ArmWord32AtomicPairSub) \
+ V(ArmWord32AtomicPairAnd) \
+ V(ArmWord32AtomicPairOr) \
+ V(ArmWord32AtomicPairXor) \
+ V(ArmWord32AtomicPairExchange) \
V(ArmWord32AtomicPairCompareExchange)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index 3de9b2aab6..d0511ae62b 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -498,9 +498,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
ArmOperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r1));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), r1));
}
void InstructionSelector::VisitStoreLane(Node* node) {
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index fcab0a739b..d04bcf245c 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -821,7 +821,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
- int const num_parameters = MiscField::decode(instr->opcode());
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
Label return_location;
#if V8_ENABLE_WEBASSEMBLY
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
@@ -832,10 +833,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
- __ CallCFunction(ref, num_parameters, 0);
+ __ CallCFunction(ref, num_gp_parameters, num_fp_parameters);
} else {
Register func = i.InputRegister(0);
- __ CallCFunction(func, num_parameters, 0);
+ __ CallCFunction(func, num_gp_parameters, num_fp_parameters);
}
__ Bind(&return_location);
#if V8_ENABLE_WEBASSEMBLY
@@ -871,16 +872,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchBinarySearchSwitch:
AssembleArchBinarySearchSwitch(instr);
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK_EQ(i.InputRegister(0), x1);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
- __ Debug("kArchAbortCSAAssert", 0, BREAK);
+ __ Debug("kArchAbortCSADcheck", 0, BREAK);
unwinding_info_writer_.MarkBlockWillExit();
break;
case kArchDebugBreak:
@@ -2077,6 +2078,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1).Format(f)); \
break; \
}
+#define SIMD_FCM_L_CASE(Op, ImmOp, RegOp) \
+ case Op: { \
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
+ if (instr->InputCount() == 1) { \
+ __ Fcm##ImmOp(i.OutputSimd128Register().Format(f), \
+ i.InputSimd128Register(0).Format(f), +0.0); \
+ } else { \
+ __ Fcm##RegOp(i.OutputSimd128Register().Format(f), \
+ i.InputSimd128Register(1).Format(f), \
+ i.InputSimd128Register(0).Format(f)); \
+ } \
+ break; \
+ }
+#define SIMD_FCM_G_CASE(Op, ImmOp) \
+ case Op: { \
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
+ /* Currently Gt/Ge instructions are only used with zero */ \
+ DCHECK_EQ(instr->InputCount(), 1); \
+ __ Fcm##ImmOp(i.OutputSimd128Register().Format(f), \
+ i.InputSimd128Register(0).Format(f), +0.0); \
+ break; \
+ }
#define SIMD_DESTRUCTIVE_BINOP_CASE(Op, Instr, FORMAT) \
case Op: { \
VRegister dst = i.OutputSimd128Register().V##FORMAT(); \
@@ -2192,29 +2215,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).Format(f), 0);
break;
}
- SIMD_BINOP_LANE_SIZE_CASE(kArm64FEq, Fcmeq);
+ SIMD_FCM_L_CASE(kArm64FEq, eq, eq);
case kArm64FNe: {
VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
VRegister dst = i.OutputSimd128Register().Format(f);
- __ Fcmeq(dst, i.InputSimd128Register(0).Format(f),
- i.InputSimd128Register(1).Format(f));
+ if (instr->InputCount() == 1) {
+ __ Fcmeq(dst, i.InputSimd128Register(0).Format(f), +0.0);
+ } else {
+ __ Fcmeq(dst, i.InputSimd128Register(0).Format(f),
+ i.InputSimd128Register(1).Format(f));
+ }
__ Mvn(dst, dst);
break;
}
- case kArm64FLt: {
- VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
- __ Fcmgt(i.OutputSimd128Register().Format(f),
- i.InputSimd128Register(1).Format(f),
- i.InputSimd128Register(0).Format(f));
- break;
- }
- case kArm64FLe: {
- VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
- __ Fcmge(i.OutputSimd128Register().Format(f),
- i.InputSimd128Register(1).Format(f),
- i.InputSimd128Register(0).Format(f));
- break;
- }
+ SIMD_FCM_L_CASE(kArm64FLt, lt, gt);
+ SIMD_FCM_L_CASE(kArm64FLe, le, ge);
+ SIMD_FCM_G_CASE(kArm64FGt, gt);
+ SIMD_FCM_G_CASE(kArm64FGe, ge);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F64x2Qfma, Fmla, 2D);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F64x2Qfms, Fmls, 2D);
case kArm64F64x2Pmin: {
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index d57203639e..d8ee809918 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -11,337 +11,344 @@ namespace compiler {
// ARM64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(Arm64Add) \
- V(Arm64Add32) \
- V(Arm64And) \
- V(Arm64And32) \
- V(Arm64Bic) \
- V(Arm64Bic32) \
- V(Arm64Clz) \
- V(Arm64Clz32) \
- V(Arm64Cmp) \
- V(Arm64Cmp32) \
- V(Arm64Cmn) \
- V(Arm64Cmn32) \
- V(Arm64Cnt) \
- V(Arm64Cnt32) \
- V(Arm64Cnt64) \
- V(Arm64Tst) \
- V(Arm64Tst32) \
- V(Arm64Or) \
- V(Arm64Or32) \
- V(Arm64Orn) \
- V(Arm64Orn32) \
- V(Arm64Eor) \
- V(Arm64Eor32) \
- V(Arm64Eon) \
- V(Arm64Eon32) \
- V(Arm64Sadalp) \
- V(Arm64Saddlp) \
- V(Arm64Sub) \
- V(Arm64Sub32) \
- V(Arm64Mul) \
- V(Arm64Mul32) \
- V(Arm64Smlal) \
- V(Arm64Smlal2) \
- V(Arm64Smull) \
- V(Arm64Smull2) \
- V(Arm64Uadalp) \
- V(Arm64Uaddlp) \
- V(Arm64Umlal) \
- V(Arm64Umlal2) \
- V(Arm64Umull) \
- V(Arm64Umull2) \
- V(Arm64Madd) \
- V(Arm64Madd32) \
- V(Arm64Msub) \
- V(Arm64Msub32) \
- V(Arm64Mneg) \
- V(Arm64Mneg32) \
- V(Arm64Idiv) \
- V(Arm64Idiv32) \
- V(Arm64Udiv) \
- V(Arm64Udiv32) \
- V(Arm64Imod) \
- V(Arm64Imod32) \
- V(Arm64Umod) \
- V(Arm64Umod32) \
- V(Arm64Not) \
- V(Arm64Not32) \
- V(Arm64Lsl) \
- V(Arm64Lsl32) \
- V(Arm64Lsr) \
- V(Arm64Lsr32) \
- V(Arm64Asr) \
- V(Arm64Asr32) \
- V(Arm64Ror) \
- V(Arm64Ror32) \
- V(Arm64Mov32) \
- V(Arm64Sxtb32) \
- V(Arm64Sxth32) \
- V(Arm64Sxtb) \
- V(Arm64Sxth) \
- V(Arm64Sxtw) \
- V(Arm64Sbfx) \
- V(Arm64Sbfx32) \
- V(Arm64Ubfx) \
- V(Arm64Ubfx32) \
- V(Arm64Ubfiz32) \
- V(Arm64Bfi) \
- V(Arm64Rbit) \
- V(Arm64Rbit32) \
- V(Arm64Rev) \
- V(Arm64Rev32) \
- V(Arm64TestAndBranch32) \
- V(Arm64TestAndBranch) \
- V(Arm64CompareAndBranch32) \
- V(Arm64CompareAndBranch) \
- V(Arm64Claim) \
- V(Arm64Poke) \
- V(Arm64PokePair) \
- V(Arm64Peek) \
- V(Arm64Float32Cmp) \
- V(Arm64Float32Add) \
- V(Arm64Float32Sub) \
- V(Arm64Float32Mul) \
- V(Arm64Float32Div) \
- V(Arm64Float32Abs) \
- V(Arm64Float32Abd) \
- V(Arm64Float32Neg) \
- V(Arm64Float32Sqrt) \
- V(Arm64Float32Fnmul) \
- V(Arm64Float32RoundDown) \
- V(Arm64Float32Max) \
- V(Arm64Float32Min) \
- V(Arm64Float64Cmp) \
- V(Arm64Float64Add) \
- V(Arm64Float64Sub) \
- V(Arm64Float64Mul) \
- V(Arm64Float64Div) \
- V(Arm64Float64Mod) \
- V(Arm64Float64Max) \
- V(Arm64Float64Min) \
- V(Arm64Float64Abs) \
- V(Arm64Float64Abd) \
- V(Arm64Float64Neg) \
- V(Arm64Float64Sqrt) \
- V(Arm64Float64Fnmul) \
- V(Arm64Float64RoundDown) \
- V(Arm64Float32RoundUp) \
- V(Arm64Float64RoundUp) \
- V(Arm64Float64RoundTiesAway) \
- V(Arm64Float32RoundTruncate) \
- V(Arm64Float64RoundTruncate) \
- V(Arm64Float32RoundTiesEven) \
- V(Arm64Float64RoundTiesEven) \
- V(Arm64Float64SilenceNaN) \
- V(Arm64Float32ToFloat64) \
- V(Arm64Float64ToFloat32) \
- V(Arm64Float32ToInt32) \
- V(Arm64Float64ToInt32) \
- V(Arm64Float32ToUint32) \
- V(Arm64Float64ToUint32) \
- V(Arm64Float32ToInt64) \
- V(Arm64Float64ToInt64) \
- V(Arm64Float32ToUint64) \
- V(Arm64Float64ToUint64) \
- V(Arm64Int32ToFloat32) \
- V(Arm64Int32ToFloat64) \
- V(Arm64Int64ToFloat32) \
- V(Arm64Int64ToFloat64) \
- V(Arm64Uint32ToFloat32) \
- V(Arm64Uint32ToFloat64) \
- V(Arm64Uint64ToFloat32) \
- V(Arm64Uint64ToFloat64) \
- V(Arm64Float64ExtractLowWord32) \
- V(Arm64Float64ExtractHighWord32) \
- V(Arm64Float64InsertLowWord32) \
- V(Arm64Float64InsertHighWord32) \
- V(Arm64Float64MoveU64) \
- V(Arm64U64MoveFloat64) \
- V(Arm64LdrS) \
- V(Arm64StrS) \
- V(Arm64LdrD) \
- V(Arm64StrD) \
- V(Arm64LdrQ) \
- V(Arm64StrQ) \
- V(Arm64Ldrb) \
- V(Arm64Ldrsb) \
- V(Arm64LdrsbW) \
- V(Arm64Strb) \
- V(Arm64Ldrh) \
- V(Arm64Ldrsh) \
- V(Arm64LdrshW) \
- V(Arm64Strh) \
- V(Arm64Ldrsw) \
- V(Arm64LdrW) \
- V(Arm64StrW) \
- V(Arm64Ldr) \
- V(Arm64LdrDecompressTaggedSigned) \
- V(Arm64LdrDecompressTaggedPointer) \
- V(Arm64LdrDecompressAnyTagged) \
- V(Arm64LdarDecompressTaggedSigned) \
- V(Arm64LdarDecompressTaggedPointer) \
- V(Arm64LdarDecompressAnyTagged) \
- V(Arm64Str) \
- V(Arm64StrCompressTagged) \
- V(Arm64StlrCompressTagged) \
- V(Arm64DmbIsh) \
- V(Arm64DsbIsb) \
- V(Arm64Sxtl) \
- V(Arm64Sxtl2) \
- V(Arm64Uxtl) \
- V(Arm64Uxtl2) \
- V(Arm64FSplat) \
- V(Arm64FAbs) \
- V(Arm64FSqrt) \
- V(Arm64FNeg) \
- V(Arm64FExtractLane) \
- V(Arm64FReplaceLane) \
- V(Arm64FAdd) \
- V(Arm64FSub) \
- V(Arm64FMul) \
- V(Arm64FMulElement) \
- V(Arm64FDiv) \
- V(Arm64FMin) \
- V(Arm64FMax) \
- V(Arm64FEq) \
- V(Arm64FNe) \
- V(Arm64FLt) \
- V(Arm64FLe) \
- V(Arm64F64x2Qfma) \
- V(Arm64F64x2Qfms) \
- V(Arm64F64x2Pmin) \
- V(Arm64F64x2Pmax) \
- V(Arm64F64x2ConvertLowI32x4S) \
- V(Arm64F64x2ConvertLowI32x4U) \
- V(Arm64F64x2PromoteLowF32x4) \
- V(Arm64F32x4SConvertI32x4) \
- V(Arm64F32x4UConvertI32x4) \
- V(Arm64F32x4RecipApprox) \
- V(Arm64F32x4RecipSqrtApprox) \
- V(Arm64F32x4Qfma) \
- V(Arm64F32x4Qfms) \
- V(Arm64F32x4Pmin) \
- V(Arm64F32x4Pmax) \
- V(Arm64F32x4DemoteF64x2Zero) \
- V(Arm64ISplat) \
- V(Arm64IAbs) \
- V(Arm64INeg) \
- V(Arm64IExtractLane) \
- V(Arm64IReplaceLane) \
- V(Arm64I64x2Shl) \
- V(Arm64I64x2ShrS) \
- V(Arm64IAdd) \
- V(Arm64ISub) \
- V(Arm64I64x2Mul) \
- V(Arm64IEq) \
- V(Arm64INe) \
- V(Arm64IGtS) \
- V(Arm64IGeS) \
- V(Arm64I64x2ShrU) \
- V(Arm64I64x2BitMask) \
- V(Arm64I32x4SConvertF32x4) \
- V(Arm64I32x4Shl) \
- V(Arm64I32x4ShrS) \
- V(Arm64I32x4Mul) \
- V(Arm64Mla) \
- V(Arm64Mls) \
- V(Arm64IMinS) \
- V(Arm64IMaxS) \
- V(Arm64I32x4UConvertF32x4) \
- V(Arm64I32x4ShrU) \
- V(Arm64IMinU) \
- V(Arm64IMaxU) \
- V(Arm64IGtU) \
- V(Arm64IGeU) \
- V(Arm64I32x4BitMask) \
- V(Arm64I32x4DotI16x8S) \
- V(Arm64I32x4TruncSatF64x2SZero) \
- V(Arm64I32x4TruncSatF64x2UZero) \
- V(Arm64IExtractLaneU) \
- V(Arm64IExtractLaneS) \
- V(Arm64I16x8Shl) \
- V(Arm64I16x8ShrS) \
- V(Arm64I16x8SConvertI32x4) \
- V(Arm64IAddSatS) \
- V(Arm64ISubSatS) \
- V(Arm64I16x8Mul) \
- V(Arm64I16x8ShrU) \
- V(Arm64I16x8UConvertI32x4) \
- V(Arm64IAddSatU) \
- V(Arm64ISubSatU) \
- V(Arm64RoundingAverageU) \
- V(Arm64I16x8Q15MulRSatS) \
- V(Arm64I16x8BitMask) \
- V(Arm64I8x16Shl) \
- V(Arm64I8x16ShrS) \
- V(Arm64I8x16SConvertI16x8) \
- V(Arm64I8x16ShrU) \
- V(Arm64I8x16UConvertI16x8) \
- V(Arm64I8x16BitMask) \
- V(Arm64S128Const) \
- V(Arm64S128Zero) \
- V(Arm64S128Dup) \
- V(Arm64S128And) \
- V(Arm64S128Or) \
- V(Arm64S128Xor) \
- V(Arm64S128Not) \
- V(Arm64S128Select) \
- V(Arm64S128AndNot) \
- V(Arm64Ssra) \
- V(Arm64Usra) \
- V(Arm64S32x4ZipLeft) \
- V(Arm64S32x4ZipRight) \
- V(Arm64S32x4UnzipLeft) \
- V(Arm64S32x4UnzipRight) \
- V(Arm64S32x4TransposeLeft) \
- V(Arm64S32x4TransposeRight) \
- V(Arm64S32x4Shuffle) \
- V(Arm64S16x8ZipLeft) \
- V(Arm64S16x8ZipRight) \
- V(Arm64S16x8UnzipLeft) \
- V(Arm64S16x8UnzipRight) \
- V(Arm64S16x8TransposeLeft) \
- V(Arm64S16x8TransposeRight) \
- V(Arm64S8x16ZipLeft) \
- V(Arm64S8x16ZipRight) \
- V(Arm64S8x16UnzipLeft) \
- V(Arm64S8x16UnzipRight) \
- V(Arm64S8x16TransposeLeft) \
- V(Arm64S8x16TransposeRight) \
- V(Arm64S8x16Concat) \
- V(Arm64I8x16Swizzle) \
- V(Arm64I8x16Shuffle) \
- V(Arm64S32x2Reverse) \
- V(Arm64S16x4Reverse) \
- V(Arm64S16x2Reverse) \
- V(Arm64S8x8Reverse) \
- V(Arm64S8x4Reverse) \
- V(Arm64S8x2Reverse) \
- V(Arm64V128AnyTrue) \
- V(Arm64I64x2AllTrue) \
- V(Arm64I32x4AllTrue) \
- V(Arm64I16x8AllTrue) \
- V(Arm64I8x16AllTrue) \
- V(Arm64LoadSplat) \
- V(Arm64LoadLane) \
- V(Arm64StoreLane) \
- V(Arm64S128Load8x8S) \
- V(Arm64S128Load8x8U) \
- V(Arm64S128Load16x4S) \
- V(Arm64S128Load16x4U) \
- V(Arm64S128Load32x2S) \
- V(Arm64S128Load32x2U) \
- V(Arm64Word64AtomicLoadUint64) \
- V(Arm64Word64AtomicStoreWord64) \
- V(Arm64Word64AtomicAddUint64) \
- V(Arm64Word64AtomicSubUint64) \
- V(Arm64Word64AtomicAndUint64) \
- V(Arm64Word64AtomicOrUint64) \
- V(Arm64Word64AtomicXorUint64) \
- V(Arm64Word64AtomicExchangeUint64) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(Arm64Ldr) \
+ V(Arm64Ldrb) \
+ V(Arm64LdrD) \
+ V(Arm64Ldrh) \
+ V(Arm64LdrQ) \
+ V(Arm64LdrS) \
+ V(Arm64Ldrsb) \
+ V(Arm64LdrsbW) \
+ V(Arm64Ldrsh) \
+ V(Arm64LdrshW) \
+ V(Arm64Ldrsw) \
+ V(Arm64LdrW) \
+ V(Arm64LoadLane) \
+ V(Arm64LoadSplat) \
+ V(Arm64S128Load16x4S) \
+ V(Arm64S128Load16x4U) \
+ V(Arm64S128Load32x2S) \
+ V(Arm64S128Load32x2U) \
+ V(Arm64S128Load8x8S) \
+ V(Arm64S128Load8x8U) \
+ V(Arm64StoreLane) \
+ V(Arm64Str) \
+ V(Arm64Strb) \
+ V(Arm64StrD) \
+ V(Arm64Strh) \
+ V(Arm64StrQ) \
+ V(Arm64StrS) \
+ V(Arm64StrW)
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(Arm64Add) \
+ V(Arm64Add32) \
+ V(Arm64And) \
+ V(Arm64And32) \
+ V(Arm64Bic) \
+ V(Arm64Bic32) \
+ V(Arm64Clz) \
+ V(Arm64Clz32) \
+ V(Arm64Cmp) \
+ V(Arm64Cmp32) \
+ V(Arm64Cmn) \
+ V(Arm64Cmn32) \
+ V(Arm64Cnt) \
+ V(Arm64Cnt32) \
+ V(Arm64Cnt64) \
+ V(Arm64Tst) \
+ V(Arm64Tst32) \
+ V(Arm64Or) \
+ V(Arm64Or32) \
+ V(Arm64Orn) \
+ V(Arm64Orn32) \
+ V(Arm64Eor) \
+ V(Arm64Eor32) \
+ V(Arm64Eon) \
+ V(Arm64Eon32) \
+ V(Arm64Sadalp) \
+ V(Arm64Saddlp) \
+ V(Arm64Sub) \
+ V(Arm64Sub32) \
+ V(Arm64Mul) \
+ V(Arm64Mul32) \
+ V(Arm64Smlal) \
+ V(Arm64Smlal2) \
+ V(Arm64Smull) \
+ V(Arm64Smull2) \
+ V(Arm64Uadalp) \
+ V(Arm64Uaddlp) \
+ V(Arm64Umlal) \
+ V(Arm64Umlal2) \
+ V(Arm64Umull) \
+ V(Arm64Umull2) \
+ V(Arm64Madd) \
+ V(Arm64Madd32) \
+ V(Arm64Msub) \
+ V(Arm64Msub32) \
+ V(Arm64Mneg) \
+ V(Arm64Mneg32) \
+ V(Arm64Idiv) \
+ V(Arm64Idiv32) \
+ V(Arm64Udiv) \
+ V(Arm64Udiv32) \
+ V(Arm64Imod) \
+ V(Arm64Imod32) \
+ V(Arm64Umod) \
+ V(Arm64Umod32) \
+ V(Arm64Not) \
+ V(Arm64Not32) \
+ V(Arm64Lsl) \
+ V(Arm64Lsl32) \
+ V(Arm64Lsr) \
+ V(Arm64Lsr32) \
+ V(Arm64Asr) \
+ V(Arm64Asr32) \
+ V(Arm64Ror) \
+ V(Arm64Ror32) \
+ V(Arm64Mov32) \
+ V(Arm64Sxtb32) \
+ V(Arm64Sxth32) \
+ V(Arm64Sxtb) \
+ V(Arm64Sxth) \
+ V(Arm64Sxtw) \
+ V(Arm64Sbfx) \
+ V(Arm64Sbfx32) \
+ V(Arm64Ubfx) \
+ V(Arm64Ubfx32) \
+ V(Arm64Ubfiz32) \
+ V(Arm64Bfi) \
+ V(Arm64Rbit) \
+ V(Arm64Rbit32) \
+ V(Arm64Rev) \
+ V(Arm64Rev32) \
+ V(Arm64TestAndBranch32) \
+ V(Arm64TestAndBranch) \
+ V(Arm64CompareAndBranch32) \
+ V(Arm64CompareAndBranch) \
+ V(Arm64Claim) \
+ V(Arm64Poke) \
+ V(Arm64PokePair) \
+ V(Arm64Peek) \
+ V(Arm64Float32Cmp) \
+ V(Arm64Float32Add) \
+ V(Arm64Float32Sub) \
+ V(Arm64Float32Mul) \
+ V(Arm64Float32Div) \
+ V(Arm64Float32Abs) \
+ V(Arm64Float32Abd) \
+ V(Arm64Float32Neg) \
+ V(Arm64Float32Sqrt) \
+ V(Arm64Float32Fnmul) \
+ V(Arm64Float32RoundDown) \
+ V(Arm64Float32Max) \
+ V(Arm64Float32Min) \
+ V(Arm64Float64Cmp) \
+ V(Arm64Float64Add) \
+ V(Arm64Float64Sub) \
+ V(Arm64Float64Mul) \
+ V(Arm64Float64Div) \
+ V(Arm64Float64Mod) \
+ V(Arm64Float64Max) \
+ V(Arm64Float64Min) \
+ V(Arm64Float64Abs) \
+ V(Arm64Float64Abd) \
+ V(Arm64Float64Neg) \
+ V(Arm64Float64Sqrt) \
+ V(Arm64Float64Fnmul) \
+ V(Arm64Float64RoundDown) \
+ V(Arm64Float32RoundUp) \
+ V(Arm64Float64RoundUp) \
+ V(Arm64Float64RoundTiesAway) \
+ V(Arm64Float32RoundTruncate) \
+ V(Arm64Float64RoundTruncate) \
+ V(Arm64Float32RoundTiesEven) \
+ V(Arm64Float64RoundTiesEven) \
+ V(Arm64Float64SilenceNaN) \
+ V(Arm64Float32ToFloat64) \
+ V(Arm64Float64ToFloat32) \
+ V(Arm64Float32ToInt32) \
+ V(Arm64Float64ToInt32) \
+ V(Arm64Float32ToUint32) \
+ V(Arm64Float64ToUint32) \
+ V(Arm64Float32ToInt64) \
+ V(Arm64Float64ToInt64) \
+ V(Arm64Float32ToUint64) \
+ V(Arm64Float64ToUint64) \
+ V(Arm64Int32ToFloat32) \
+ V(Arm64Int32ToFloat64) \
+ V(Arm64Int64ToFloat32) \
+ V(Arm64Int64ToFloat64) \
+ V(Arm64Uint32ToFloat32) \
+ V(Arm64Uint32ToFloat64) \
+ V(Arm64Uint64ToFloat32) \
+ V(Arm64Uint64ToFloat64) \
+ V(Arm64Float64ExtractLowWord32) \
+ V(Arm64Float64ExtractHighWord32) \
+ V(Arm64Float64InsertLowWord32) \
+ V(Arm64Float64InsertHighWord32) \
+ V(Arm64Float64MoveU64) \
+ V(Arm64U64MoveFloat64) \
+ V(Arm64LdrDecompressTaggedSigned) \
+ V(Arm64LdrDecompressTaggedPointer) \
+ V(Arm64LdrDecompressAnyTagged) \
+ V(Arm64LdarDecompressTaggedSigned) \
+ V(Arm64LdarDecompressTaggedPointer) \
+ V(Arm64LdarDecompressAnyTagged) \
+ V(Arm64StrCompressTagged) \
+ V(Arm64StlrCompressTagged) \
+ V(Arm64DmbIsh) \
+ V(Arm64DsbIsb) \
+ V(Arm64Sxtl) \
+ V(Arm64Sxtl2) \
+ V(Arm64Uxtl) \
+ V(Arm64Uxtl2) \
+ V(Arm64FSplat) \
+ V(Arm64FAbs) \
+ V(Arm64FSqrt) \
+ V(Arm64FNeg) \
+ V(Arm64FExtractLane) \
+ V(Arm64FReplaceLane) \
+ V(Arm64FAdd) \
+ V(Arm64FSub) \
+ V(Arm64FMul) \
+ V(Arm64FMulElement) \
+ V(Arm64FDiv) \
+ V(Arm64FMin) \
+ V(Arm64FMax) \
+ V(Arm64FEq) \
+ V(Arm64FNe) \
+ V(Arm64FLt) \
+ V(Arm64FLe) \
+ V(Arm64FGt) \
+ V(Arm64FGe) \
+ V(Arm64F64x2Qfma) \
+ V(Arm64F64x2Qfms) \
+ V(Arm64F64x2Pmin) \
+ V(Arm64F64x2Pmax) \
+ V(Arm64F64x2ConvertLowI32x4S) \
+ V(Arm64F64x2ConvertLowI32x4U) \
+ V(Arm64F64x2PromoteLowF32x4) \
+ V(Arm64F32x4SConvertI32x4) \
+ V(Arm64F32x4UConvertI32x4) \
+ V(Arm64F32x4RecipApprox) \
+ V(Arm64F32x4RecipSqrtApprox) \
+ V(Arm64F32x4Qfma) \
+ V(Arm64F32x4Qfms) \
+ V(Arm64F32x4Pmin) \
+ V(Arm64F32x4Pmax) \
+ V(Arm64F32x4DemoteF64x2Zero) \
+ V(Arm64ISplat) \
+ V(Arm64IAbs) \
+ V(Arm64INeg) \
+ V(Arm64IExtractLane) \
+ V(Arm64IReplaceLane) \
+ V(Arm64I64x2Shl) \
+ V(Arm64I64x2ShrS) \
+ V(Arm64IAdd) \
+ V(Arm64ISub) \
+ V(Arm64I64x2Mul) \
+ V(Arm64IEq) \
+ V(Arm64INe) \
+ V(Arm64IGtS) \
+ V(Arm64IGeS) \
+ V(Arm64I64x2ShrU) \
+ V(Arm64I64x2BitMask) \
+ V(Arm64I32x4SConvertF32x4) \
+ V(Arm64I32x4Shl) \
+ V(Arm64I32x4ShrS) \
+ V(Arm64I32x4Mul) \
+ V(Arm64Mla) \
+ V(Arm64Mls) \
+ V(Arm64IMinS) \
+ V(Arm64IMaxS) \
+ V(Arm64I32x4UConvertF32x4) \
+ V(Arm64I32x4ShrU) \
+ V(Arm64IMinU) \
+ V(Arm64IMaxU) \
+ V(Arm64IGtU) \
+ V(Arm64IGeU) \
+ V(Arm64I32x4BitMask) \
+ V(Arm64I32x4DotI16x8S) \
+ V(Arm64I32x4TruncSatF64x2SZero) \
+ V(Arm64I32x4TruncSatF64x2UZero) \
+ V(Arm64IExtractLaneU) \
+ V(Arm64IExtractLaneS) \
+ V(Arm64I16x8Shl) \
+ V(Arm64I16x8ShrS) \
+ V(Arm64I16x8SConvertI32x4) \
+ V(Arm64IAddSatS) \
+ V(Arm64ISubSatS) \
+ V(Arm64I16x8Mul) \
+ V(Arm64I16x8ShrU) \
+ V(Arm64I16x8UConvertI32x4) \
+ V(Arm64IAddSatU) \
+ V(Arm64ISubSatU) \
+ V(Arm64RoundingAverageU) \
+ V(Arm64I16x8Q15MulRSatS) \
+ V(Arm64I16x8BitMask) \
+ V(Arm64I8x16Shl) \
+ V(Arm64I8x16ShrS) \
+ V(Arm64I8x16SConvertI16x8) \
+ V(Arm64I8x16ShrU) \
+ V(Arm64I8x16UConvertI16x8) \
+ V(Arm64I8x16BitMask) \
+ V(Arm64S128Const) \
+ V(Arm64S128Zero) \
+ V(Arm64S128Dup) \
+ V(Arm64S128And) \
+ V(Arm64S128Or) \
+ V(Arm64S128Xor) \
+ V(Arm64S128Not) \
+ V(Arm64S128Select) \
+ V(Arm64S128AndNot) \
+ V(Arm64Ssra) \
+ V(Arm64Usra) \
+ V(Arm64S32x4ZipLeft) \
+ V(Arm64S32x4ZipRight) \
+ V(Arm64S32x4UnzipLeft) \
+ V(Arm64S32x4UnzipRight) \
+ V(Arm64S32x4TransposeLeft) \
+ V(Arm64S32x4TransposeRight) \
+ V(Arm64S32x4Shuffle) \
+ V(Arm64S16x8ZipLeft) \
+ V(Arm64S16x8ZipRight) \
+ V(Arm64S16x8UnzipLeft) \
+ V(Arm64S16x8UnzipRight) \
+ V(Arm64S16x8TransposeLeft) \
+ V(Arm64S16x8TransposeRight) \
+ V(Arm64S8x16ZipLeft) \
+ V(Arm64S8x16ZipRight) \
+ V(Arm64S8x16UnzipLeft) \
+ V(Arm64S8x16UnzipRight) \
+ V(Arm64S8x16TransposeLeft) \
+ V(Arm64S8x16TransposeRight) \
+ V(Arm64S8x16Concat) \
+ V(Arm64I8x16Swizzle) \
+ V(Arm64I8x16Shuffle) \
+ V(Arm64S32x2Reverse) \
+ V(Arm64S16x4Reverse) \
+ V(Arm64S16x2Reverse) \
+ V(Arm64S8x8Reverse) \
+ V(Arm64S8x4Reverse) \
+ V(Arm64S8x2Reverse) \
+ V(Arm64V128AnyTrue) \
+ V(Arm64I64x2AllTrue) \
+ V(Arm64I32x4AllTrue) \
+ V(Arm64I16x8AllTrue) \
+ V(Arm64I8x16AllTrue) \
+ V(Arm64Word64AtomicLoadUint64) \
+ V(Arm64Word64AtomicStoreWord64) \
+ V(Arm64Word64AtomicAddUint64) \
+ V(Arm64Word64AtomicSubUint64) \
+ V(Arm64Word64AtomicAndUint64) \
+ V(Arm64Word64AtomicOrUint64) \
+ V(Arm64Word64AtomicXorUint64) \
+ V(Arm64Word64AtomicExchangeUint64) \
V(Arm64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index bb16b76aaf..4d123050ec 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -170,6 +170,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64FNe:
case kArm64FLt:
case kArm64FLe:
+ case kArm64FGt:
+ case kArm64FGe:
case kArm64F64x2Qfma:
case kArm64F64x2Qfms:
case kArm64F64x2Pmin:
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index d102ecabb2..5dec14b998 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -579,9 +579,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
Arm64OperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), x1));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), x1));
}
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
@@ -3538,19 +3538,11 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F64x2Add, kArm64FAdd, 64) \
V(F64x2Sub, kArm64FSub, 64) \
V(F64x2Div, kArm64FDiv, 64) \
- V(F64x2Eq, kArm64FEq, 64) \
- V(F64x2Ne, kArm64FNe, 64) \
- V(F64x2Lt, kArm64FLt, 64) \
- V(F64x2Le, kArm64FLe, 64) \
V(F32x4Min, kArm64FMin, 32) \
V(F32x4Max, kArm64FMax, 32) \
V(F32x4Add, kArm64FAdd, 32) \
V(F32x4Sub, kArm64FSub, 32) \
V(F32x4Div, kArm64FDiv, 32) \
- V(F32x4Eq, kArm64FEq, 32) \
- V(F32x4Ne, kArm64FNe, 32) \
- V(F32x4Lt, kArm64FLt, 32) \
- V(F32x4Le, kArm64FLe, 32) \
V(I64x2Sub, kArm64ISub, 64) \
V(I64x2Eq, kArm64IEq, 64) \
V(I64x2Ne, kArm64INe, 64) \
@@ -3951,6 +3943,44 @@ VISIT_SIMD_SUB(I32x4, 32)
VISIT_SIMD_SUB(I16x8, 16)
#undef VISIT_SIMD_SUB
+namespace {
+bool isSimdZero(Arm64OperandGenerator& g, Node* node) {
+ auto m = V128ConstMatcher(node);
+ if (m.HasResolvedValue()) {
+ auto imms = m.ResolvedValue().immediate();
+ return (std::all_of(imms.begin(), imms.end(), std::logical_not<uint8_t>()));
+ }
+ return node->opcode() == IrOpcode::kS128Zero;
+}
+} // namespace
+
+#define VISIT_SIMD_FCM(Type, CmOp, CmOpposite, LaneSize) \
+ void InstructionSelector::Visit##Type##CmOp(Node* node) { \
+ Arm64OperandGenerator g(this); \
+ Node* left = node->InputAt(0); \
+ Node* right = node->InputAt(1); \
+ if (isSimdZero(g, left)) { \
+ Emit(kArm64F##CmOpposite | LaneSizeField::encode(LaneSize), \
+ g.DefineAsRegister(node), g.UseRegister(right)); \
+ return; \
+ } else if (isSimdZero(g, right)) { \
+ Emit(kArm64F##CmOp | LaneSizeField::encode(LaneSize), \
+ g.DefineAsRegister(node), g.UseRegister(left)); \
+ return; \
+ } \
+ VisitRRR(this, kArm64F##CmOp | LaneSizeField::encode(LaneSize), node); \
+ }
+
+VISIT_SIMD_FCM(F64x2, Eq, Eq, 64)
+VISIT_SIMD_FCM(F64x2, Ne, Ne, 64)
+VISIT_SIMD_FCM(F64x2, Lt, Gt, 64)
+VISIT_SIMD_FCM(F64x2, Le, Ge, 64)
+VISIT_SIMD_FCM(F32x4, Eq, Eq, 32)
+VISIT_SIMD_FCM(F32x4, Ne, Ne, 32)
+VISIT_SIMD_FCM(F32x4, Lt, Gt, 32)
+VISIT_SIMD_FCM(F32x4, Le, Ge, 32)
+#undef VISIT_SIMD_FCM
+
void InstructionSelector::VisitS128Select(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64S128Select, g.DefineSameAsFirst(node),
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 1cd78b4359..da6a9a81e3 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -791,8 +791,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchPrepareCallCFunction: {
// Frame alignment requires using FP-relative frame addressing.
frame_access_state()->SetFrameAccessToFP();
- int const num_parameters = MiscField::decode(instr->opcode());
- __ PrepareCallCFunction(num_parameters, i.TempRegister(0));
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_gp_parameters + num_fp_parameters,
+ i.TempRegister(0));
break;
}
case kArchSaveCallerRegisters: {
@@ -887,13 +889,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == edx);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ int3();
@@ -1255,79 +1257,79 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32LFence:
__ lfence();
break;
- case kSSEFloat32Cmp:
- __ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
+ case kIA32Float32Cmp:
+ __ Ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
break;
- case kSSEFloat32Sqrt:
- __ sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
+ case kIA32Float32Sqrt:
+ __ Sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEFloat32Round: {
+ case kIA32Float32Round: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
- __ roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
+ __ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
break;
}
- case kSSEFloat64Cmp:
- __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+ case kIA32Float64Cmp:
+ __ Ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
break;
- case kSSEFloat32Max: {
+ case kIA32Float32Max: {
Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
- __ ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
}
auto ool =
zone()->New<OutOfLineLoadFloat32NaN>(this, i.OutputDoubleRegister());
__ j(parity_even, ool->entry());
__ j(above, &done_compare, Label::kNear);
__ j(below, &compare_swap, Label::kNear);
- __ movmskps(i.TempRegister(0), i.InputDoubleRegister(0));
+ __ Movmskps(i.TempRegister(0), i.InputDoubleRegister(0));
__ test(i.TempRegister(0), Immediate(1));
__ j(zero, &done_compare, Label::kNear);
__ bind(&compare_swap);
if (instr->InputAt(1)->IsFPRegister()) {
- __ movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ movss(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Movss(i.InputDoubleRegister(0), i.InputOperand(1));
}
__ bind(&done_compare);
__ bind(ool->exit());
break;
}
- case kSSEFloat64Max: {
+ case kIA32Float64Max: {
Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
- __ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
}
auto ool =
zone()->New<OutOfLineLoadFloat64NaN>(this, i.OutputDoubleRegister());
__ j(parity_even, ool->entry());
__ j(above, &done_compare, Label::kNear);
__ j(below, &compare_swap, Label::kNear);
- __ movmskpd(i.TempRegister(0), i.InputDoubleRegister(0));
+ __ Movmskpd(i.TempRegister(0), i.InputDoubleRegister(0));
__ test(i.TempRegister(0), Immediate(1));
__ j(zero, &done_compare, Label::kNear);
__ bind(&compare_swap);
if (instr->InputAt(1)->IsFPRegister()) {
- __ movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ movsd(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Movsd(i.InputDoubleRegister(0), i.InputOperand(1));
}
__ bind(&done_compare);
__ bind(ool->exit());
break;
}
- case kSSEFloat32Min: {
+ case kIA32Float32Min: {
Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
- __ ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
}
auto ool =
zone()->New<OutOfLineLoadFloat32NaN>(this, i.OutputDoubleRegister());
@@ -1335,29 +1337,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ j(below, &done_compare, Label::kNear);
__ j(above, &compare_swap, Label::kNear);
if (instr->InputAt(1)->IsFPRegister()) {
- __ movmskps(i.TempRegister(0), i.InputDoubleRegister(1));
+ __ Movmskps(i.TempRegister(0), i.InputDoubleRegister(1));
} else {
- __ movss(kScratchDoubleReg, i.InputOperand(1));
- __ movmskps(i.TempRegister(0), kScratchDoubleReg);
+ __ Movss(kScratchDoubleReg, i.InputOperand(1));
+ __ Movmskps(i.TempRegister(0), kScratchDoubleReg);
}
__ test(i.TempRegister(0), Immediate(1));
__ j(zero, &done_compare, Label::kNear);
__ bind(&compare_swap);
if (instr->InputAt(1)->IsFPRegister()) {
- __ movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ movss(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Movss(i.InputDoubleRegister(0), i.InputOperand(1));
}
__ bind(&done_compare);
__ bind(ool->exit());
break;
}
- case kSSEFloat64Min: {
+ case kIA32Float64Min: {
Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
- __ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
}
auto ool =
zone()->New<OutOfLineLoadFloat64NaN>(this, i.OutputDoubleRegister());
@@ -1365,32 +1367,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ j(below, &done_compare, Label::kNear);
__ j(above, &compare_swap, Label::kNear);
if (instr->InputAt(1)->IsFPRegister()) {
- __ movmskpd(i.TempRegister(0), i.InputDoubleRegister(1));
+ __ Movmskpd(i.TempRegister(0), i.InputDoubleRegister(1));
} else {
- __ movsd(kScratchDoubleReg, i.InputOperand(1));
- __ movmskpd(i.TempRegister(0), kScratchDoubleReg);
+ __ Movsd(kScratchDoubleReg, i.InputOperand(1));
+ __ Movmskpd(i.TempRegister(0), kScratchDoubleReg);
}
__ test(i.TempRegister(0), Immediate(1));
__ j(zero, &done_compare, Label::kNear);
__ bind(&compare_swap);
if (instr->InputAt(1)->IsFPRegister()) {
- __ movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ movsd(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Movsd(i.InputDoubleRegister(0), i.InputOperand(1));
}
__ bind(&done_compare);
__ bind(ool->exit());
break;
}
- case kSSEFloat64Mod: {
+ case kIA32Float64Mod: {
Register tmp = i.TempRegister(1);
__ mov(tmp, esp);
__ AllocateStackSpace(kDoubleSize);
__ and_(esp, -8); // align to 8 byte boundary.
// Move values to st(0) and st(1).
- __ movsd(Operand(esp, 0), i.InputDoubleRegister(1));
+ __ Movsd(Operand(esp, 0), i.InputDoubleRegister(1));
__ fld_d(Operand(esp, 0));
- __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
+ __ Movsd(Operand(esp, 0), i.InputDoubleRegister(0));
__ fld_d(Operand(esp, 0));
// Loop while fprem isn't done.
Label mod_loop;
@@ -1406,76 +1408,77 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Move output to stack and clean up.
__ fstp(1);
__ fstp_d(Operand(esp, 0));
- __ movsd(i.OutputDoubleRegister(), Operand(esp, 0));
+ __ Movsd(i.OutputDoubleRegister(), Operand(esp, 0));
__ mov(esp, tmp);
break;
}
- case kSSEFloat64Sqrt:
- __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
+ case kIA32Float64Sqrt:
+ __ Sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEFloat64Round: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ case kIA32Float64Round: {
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
+ __ Roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
break;
}
- case kSSEFloat32ToFloat64:
- __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+ case kIA32Float32ToFloat64:
+ __ Cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEFloat64ToFloat32:
- __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
+ case kIA32Float64ToFloat32:
+ __ Cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEFloat32ToInt32:
- __ cvttss2si(i.OutputRegister(), i.InputOperand(0));
+ case kIA32Float32ToInt32:
+ __ Cvttss2si(i.OutputRegister(), i.InputOperand(0));
break;
- case kSSEFloat32ToUint32:
+ case kIA32Float32ToUint32:
__ Cvttss2ui(i.OutputRegister(), i.InputOperand(0),
i.TempSimd128Register(0));
break;
- case kSSEFloat64ToInt32:
- __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
+ case kIA32Float64ToInt32:
+ __ Cvttsd2si(i.OutputRegister(), i.InputOperand(0));
break;
- case kSSEFloat64ToUint32:
+ case kIA32Float64ToUint32:
__ Cvttsd2ui(i.OutputRegister(), i.InputOperand(0),
i.TempSimd128Register(0));
break;
case kSSEInt32ToFloat32:
+ // Calling Cvtsi2ss (which does a xor) regresses some benchmarks.
__ cvtsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEUint32ToFloat32:
+ case kIA32Uint32ToFloat32:
__ Cvtui2ss(i.OutputDoubleRegister(), i.InputOperand(0),
i.TempRegister(0));
break;
case kSSEInt32ToFloat64:
+ // Calling Cvtsi2sd (which does a xor) regresses some benchmarks.
__ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEUint32ToFloat64:
+ case kIA32Uint32ToFloat64:
__ Cvtui2sd(i.OutputDoubleRegister(), i.InputOperand(0),
i.TempRegister(0));
break;
- case kSSEFloat64ExtractLowWord32:
+ case kIA32Float64ExtractLowWord32:
if (instr->InputAt(0)->IsFPStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0));
} else {
- __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
- case kSSEFloat64ExtractHighWord32:
+ case kIA32Float64ExtractHighWord32:
if (instr->InputAt(0)->IsFPStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
} else {
__ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
}
break;
- case kSSEFloat64InsertLowWord32:
+ case kIA32Float64InsertLowWord32:
__ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
break;
- case kSSEFloat64InsertHighWord32:
+ case kIA32Float64InsertHighWord32:
__ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
break;
- case kSSEFloat64LoadLowWord32:
- __ movd(i.OutputDoubleRegister(), i.InputOperand(0));
+ case kIA32Float64LoadLowWord32:
+ __ Movd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kFloat32Add: {
__ Addss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1524,64 +1527,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kFloat32Abs: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psrlq(kScratchDoubleReg, byte{33});
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
- i.InputOperand(0));
- } else {
- DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
- }
+ __ Absps(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.TempRegister(0));
break;
}
case kFloat32Neg: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psllq(kScratchDoubleReg, byte{31});
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
- i.InputOperand(0));
- } else {
- DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
- }
+ __ Negps(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.TempRegister(0));
break;
}
case kFloat64Abs: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psrlq(kScratchDoubleReg, byte{1});
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
- i.InputOperand(0));
- } else {
- DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
- }
+ __ Abspd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.TempRegister(0));
break;
}
case kFloat64Neg: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psllq(kScratchDoubleReg, byte{63});
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
- i.InputOperand(0));
- } else {
- DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
- }
+ __ Negpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.TempRegister(0));
break;
}
- case kSSEFloat64SilenceNaN:
- __ xorps(kScratchDoubleReg, kScratchDoubleReg);
- __ subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
+ case kIA32Float64SilenceNaN:
+ __ Xorps(kScratchDoubleReg, kScratchDoubleReg);
+ __ Subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
break;
case kIA32Movsxbl:
ASSEMBLE_MOVX(movsx_b);
@@ -1873,7 +1840,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32F64x2PromoteLowF32x4: {
- __ Cvtps2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ if (HasAddressingMode(instr)) {
+ __ Cvtps2pd(i.OutputSimd128Register(), i.MemoryOperand());
+ } else {
+ __ Cvtps2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ }
break;
}
case kIA32F32x4DemoteF64x2Zero: {
@@ -2020,28 +1991,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I64x2Mul: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister left = i.InputSimd128Register(0);
- XMMRegister right = i.InputSimd128Register(1);
- XMMRegister tmp1 = i.TempSimd128Register(0);
- XMMRegister tmp2 = i.TempSimd128Register(1);
-
- __ Movaps(tmp1, left);
- __ Movaps(tmp2, right);
-
- // Multiply high dword of each qword of left with right.
- __ Psrlq(tmp1, byte{32});
- __ Pmuludq(tmp1, tmp1, right);
-
- // Multiply high dword of each qword of right with left.
- __ Psrlq(tmp2, byte{32});
- __ Pmuludq(tmp2, tmp2, left);
-
- __ Paddq(tmp2, tmp2, tmp1);
- __ Psllq(tmp2, tmp2, byte{32});
-
- __ Pmuludq(dst, left, right);
- __ Paddq(dst, dst, tmp2);
+ __ I64x2Mul(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.TempSimd128Register(0),
+ i.TempSimd128Register(1));
break;
}
case kIA32I64x2ShrU: {
@@ -2160,34 +2112,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Addps(dst, dst, kScratchDoubleReg); // add hi and lo, may round.
break;
}
- case kIA32F32x4Abs: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (dst == src) {
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psrld(kScratchDoubleReg, kScratchDoubleReg, byte{1});
- __ Andps(dst, kScratchDoubleReg);
- } else {
- __ Pcmpeqd(dst, dst);
- __ Psrld(dst, dst, byte{1});
- __ Andps(dst, src);
- }
- break;
- }
- case kIA32F32x4Neg: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (dst == src) {
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Pslld(kScratchDoubleReg, kScratchDoubleReg, byte{31});
- __ Xorps(dst, kScratchDoubleReg);
- } else {
- __ Pcmpeqd(dst, dst);
- __ Pslld(dst, dst, byte{31});
- __ Xorps(dst, src);
- }
- break;
- }
case kIA32F32x4Sqrt: {
__ Sqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
@@ -2220,76 +2144,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
- case kSSEF32x4Min: {
- XMMRegister src1 = i.InputSimd128Register(1),
- dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // The minps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minps in both orders, merge the resuls, and adjust.
- __ movaps(kScratchDoubleReg, src1);
- __ minps(kScratchDoubleReg, dst);
- __ minps(dst, src1);
- // propagate -0's and NaNs, which may be non-canonical.
- __ orps(kScratchDoubleReg, dst);
- // Canonicalize NaNs by quieting and clearing the payload.
- __ cmpps(dst, kScratchDoubleReg, 3);
- __ orps(kScratchDoubleReg, dst);
- __ psrld(dst, 10);
- __ andnps(dst, kScratchDoubleReg);
- break;
- }
- case kAVXF32x4Min: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src0 = i.InputSimd128Register(0);
- Operand src1 = i.InputOperand(1);
- // See comment above for correction of minps.
- __ vmovups(kScratchDoubleReg, src1);
- __ vminps(kScratchDoubleReg, kScratchDoubleReg, src0);
- __ vminps(dst, src0, src1);
- __ vorps(dst, dst, kScratchDoubleReg);
- __ vcmpneqps(kScratchDoubleReg, dst, dst);
- __ vorps(dst, dst, kScratchDoubleReg);
- __ vpsrld(kScratchDoubleReg, kScratchDoubleReg, 10);
- __ vandnps(dst, kScratchDoubleReg, dst);
- break;
- }
- case kSSEF32x4Max: {
- XMMRegister src1 = i.InputSimd128Register(1),
- dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // The maxps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxps in both orders, merge the resuls, and adjust.
- __ movaps(kScratchDoubleReg, src1);
- __ maxps(kScratchDoubleReg, dst);
- __ maxps(dst, src1);
- // Find discrepancies.
- __ xorps(dst, kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- __ orps(kScratchDoubleReg, dst);
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- __ subps(kScratchDoubleReg, dst);
- // Canonicalize NaNs by clearing the payload.
- __ cmpps(dst, kScratchDoubleReg, 3);
- __ psrld(dst, 10);
- __ andnps(dst, kScratchDoubleReg);
+ case kIA32F32x4Min: {
+ __ F32x4Min(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
- case kAVXF32x4Max: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src0 = i.InputSimd128Register(0);
- Operand src1 = i.InputOperand(1);
- // See comment above for correction of maxps.
- __ vmovups(kScratchDoubleReg, src1);
- __ vmaxps(kScratchDoubleReg, kScratchDoubleReg, src0);
- __ vmaxps(dst, src0, src1);
- __ vxorps(dst, dst, kScratchDoubleReg);
- __ vorps(kScratchDoubleReg, kScratchDoubleReg, dst);
- __ vsubps(kScratchDoubleReg, kScratchDoubleReg, dst);
- __ vcmpneqps(dst, kScratchDoubleReg, kScratchDoubleReg);
- __ vpsrld(dst, dst, 10);
- __ vandnps(dst, dst, kScratchDoubleReg);
+ case kIA32F32x4Max: {
+ __ F32x4Max(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kIA32F32x4Eq: {
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index bb54c726aa..ca15054763 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -11,358 +11,359 @@ namespace compiler {
// IA32-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(IA32Add) \
- V(IA32And) \
- V(IA32Cmp) \
- V(IA32Cmp16) \
- V(IA32Cmp8) \
- V(IA32Test) \
- V(IA32Test16) \
- V(IA32Test8) \
- V(IA32Or) \
- V(IA32Xor) \
- V(IA32Sub) \
- V(IA32Imul) \
- V(IA32ImulHigh) \
- V(IA32UmulHigh) \
- V(IA32Idiv) \
- V(IA32Udiv) \
- V(IA32Not) \
- V(IA32Neg) \
- V(IA32Shl) \
- V(IA32Shr) \
- V(IA32Sar) \
- V(IA32AddPair) \
- V(IA32SubPair) \
- V(IA32MulPair) \
- V(IA32ShlPair) \
- V(IA32ShrPair) \
- V(IA32SarPair) \
- V(IA32Rol) \
- V(IA32Ror) \
- V(IA32Lzcnt) \
- V(IA32Tzcnt) \
- V(IA32Popcnt) \
- V(IA32Bswap) \
- V(IA32MFence) \
- V(IA32LFence) \
- V(SSEFloat32Cmp) \
- V(SSEFloat32Sqrt) \
- V(SSEFloat32Round) \
- V(SSEFloat64Cmp) \
- V(SSEFloat64Mod) \
- V(SSEFloat32Max) \
- V(SSEFloat64Max) \
- V(SSEFloat32Min) \
- V(SSEFloat64Min) \
- V(SSEFloat64Sqrt) \
- V(SSEFloat64Round) \
- V(SSEFloat32ToFloat64) \
- V(SSEFloat64ToFloat32) \
- V(SSEFloat32ToInt32) \
- V(SSEFloat32ToUint32) \
- V(SSEFloat64ToInt32) \
- V(SSEFloat64ToUint32) \
- V(SSEInt32ToFloat32) \
- V(SSEUint32ToFloat32) \
- V(SSEInt32ToFloat64) \
- V(SSEUint32ToFloat64) \
- V(SSEFloat64ExtractLowWord32) \
- V(SSEFloat64ExtractHighWord32) \
- V(SSEFloat64InsertLowWord32) \
- V(SSEFloat64InsertHighWord32) \
- V(SSEFloat64LoadLowWord32) \
- V(SSEFloat64SilenceNaN) \
- V(Float32Add) \
- V(Float32Sub) \
- V(Float64Add) \
- V(Float64Sub) \
- V(Float32Mul) \
- V(Float32Div) \
- V(Float64Mul) \
- V(Float64Div) \
- V(Float64Abs) \
- V(Float64Neg) \
- V(Float32Abs) \
- V(Float32Neg) \
- V(IA32Movsxbl) \
- V(IA32Movzxbl) \
- V(IA32Movb) \
- V(IA32Movsxwl) \
- V(IA32Movzxwl) \
- V(IA32Movw) \
- V(IA32Movl) \
- V(IA32Movss) \
- V(IA32Movsd) \
- V(IA32Movdqu) \
- V(IA32Movlps) \
- V(IA32Movhps) \
- V(IA32BitcastFI) \
- V(IA32BitcastIF) \
- V(IA32Lea) \
- V(IA32Push) \
- V(IA32Poke) \
- V(IA32Peek) \
- V(IA32F64x2Splat) \
- V(F64x2ExtractLane) \
- V(F64x2ReplaceLane) \
- V(IA32F64x2Sqrt) \
- V(IA32F64x2Add) \
- V(IA32F64x2Sub) \
- V(IA32F64x2Mul) \
- V(IA32F64x2Div) \
- V(IA32F64x2Min) \
- V(IA32F64x2Max) \
- V(IA32F64x2Eq) \
- V(IA32F64x2Ne) \
- V(IA32F64x2Lt) \
- V(IA32F64x2Le) \
- V(IA32F64x2Pmin) \
- V(IA32F64x2Pmax) \
- V(IA32F64x2Round) \
- V(IA32F64x2ConvertLowI32x4S) \
- V(IA32F64x2ConvertLowI32x4U) \
- V(IA32F64x2PromoteLowF32x4) \
- V(IA32I64x2SplatI32Pair) \
- V(IA32I64x2ReplaceLaneI32Pair) \
- V(IA32I64x2Abs) \
- V(IA32I64x2Neg) \
- V(IA32I64x2Shl) \
- V(IA32I64x2ShrS) \
- V(IA32I64x2Add) \
- V(IA32I64x2Sub) \
- V(IA32I64x2Mul) \
- V(IA32I64x2ShrU) \
- V(IA32I64x2BitMask) \
- V(IA32I64x2Eq) \
- V(IA32I64x2Ne) \
- V(IA32I64x2GtS) \
- V(IA32I64x2GeS) \
- V(IA32I64x2ExtMulLowI32x4S) \
- V(IA32I64x2ExtMulHighI32x4S) \
- V(IA32I64x2ExtMulLowI32x4U) \
- V(IA32I64x2ExtMulHighI32x4U) \
- V(IA32I64x2SConvertI32x4Low) \
- V(IA32I64x2SConvertI32x4High) \
- V(IA32I64x2UConvertI32x4Low) \
- V(IA32I64x2UConvertI32x4High) \
- V(IA32F32x4Splat) \
- V(IA32F32x4ExtractLane) \
- V(IA32Insertps) \
- V(IA32F32x4SConvertI32x4) \
- V(IA32F32x4UConvertI32x4) \
- V(IA32F32x4Abs) \
- V(IA32F32x4Neg) \
- V(IA32F32x4Sqrt) \
- V(IA32F32x4RecipApprox) \
- V(IA32F32x4RecipSqrtApprox) \
- V(IA32F32x4Add) \
- V(IA32F32x4Sub) \
- V(IA32F32x4Mul) \
- V(IA32F32x4Div) \
- V(SSEF32x4Min) \
- V(AVXF32x4Min) \
- V(SSEF32x4Max) \
- V(AVXF32x4Max) \
- V(IA32F32x4Eq) \
- V(IA32F32x4Ne) \
- V(IA32F32x4Lt) \
- V(IA32F32x4Le) \
- V(IA32F32x4Pmin) \
- V(IA32F32x4Pmax) \
- V(IA32F32x4Round) \
- V(IA32F32x4DemoteF64x2Zero) \
- V(IA32I32x4Splat) \
- V(IA32I32x4ExtractLane) \
- V(IA32I32x4SConvertF32x4) \
- V(IA32I32x4SConvertI16x8Low) \
- V(IA32I32x4SConvertI16x8High) \
- V(IA32I32x4Neg) \
- V(IA32I32x4Shl) \
- V(IA32I32x4ShrS) \
- V(IA32I32x4Add) \
- V(IA32I32x4Sub) \
- V(IA32I32x4Mul) \
- V(IA32I32x4MinS) \
- V(IA32I32x4MaxS) \
- V(IA32I32x4Eq) \
- V(IA32I32x4Ne) \
- V(IA32I32x4GtS) \
- V(IA32I32x4GeS) \
- V(SSEI32x4UConvertF32x4) \
- V(AVXI32x4UConvertF32x4) \
- V(IA32I32x4UConvertI16x8Low) \
- V(IA32I32x4UConvertI16x8High) \
- V(IA32I32x4ShrU) \
- V(IA32I32x4MinU) \
- V(IA32I32x4MaxU) \
- V(SSEI32x4GtU) \
- V(AVXI32x4GtU) \
- V(SSEI32x4GeU) \
- V(AVXI32x4GeU) \
- V(IA32I32x4Abs) \
- V(IA32I32x4BitMask) \
- V(IA32I32x4DotI16x8S) \
- V(IA32I32x4ExtMulLowI16x8S) \
- V(IA32I32x4ExtMulHighI16x8S) \
- V(IA32I32x4ExtMulLowI16x8U) \
- V(IA32I32x4ExtMulHighI16x8U) \
- V(IA32I32x4ExtAddPairwiseI16x8S) \
- V(IA32I32x4ExtAddPairwiseI16x8U) \
- V(IA32I32x4TruncSatF64x2SZero) \
- V(IA32I32x4TruncSatF64x2UZero) \
- V(IA32I16x8Splat) \
- V(IA32I16x8ExtractLaneS) \
- V(IA32I16x8SConvertI8x16Low) \
- V(IA32I16x8SConvertI8x16High) \
- V(IA32I16x8Neg) \
- V(IA32I16x8Shl) \
- V(IA32I16x8ShrS) \
- V(IA32I16x8SConvertI32x4) \
- V(IA32I16x8Add) \
- V(IA32I16x8AddSatS) \
- V(IA32I16x8Sub) \
- V(IA32I16x8SubSatS) \
- V(IA32I16x8Mul) \
- V(IA32I16x8MinS) \
- V(IA32I16x8MaxS) \
- V(IA32I16x8Eq) \
- V(SSEI16x8Ne) \
- V(AVXI16x8Ne) \
- V(IA32I16x8GtS) \
- V(SSEI16x8GeS) \
- V(AVXI16x8GeS) \
- V(IA32I16x8UConvertI8x16Low) \
- V(IA32I16x8UConvertI8x16High) \
- V(IA32I16x8ShrU) \
- V(IA32I16x8UConvertI32x4) \
- V(IA32I16x8AddSatU) \
- V(IA32I16x8SubSatU) \
- V(IA32I16x8MinU) \
- V(IA32I16x8MaxU) \
- V(SSEI16x8GtU) \
- V(AVXI16x8GtU) \
- V(SSEI16x8GeU) \
- V(AVXI16x8GeU) \
- V(IA32I16x8RoundingAverageU) \
- V(IA32I16x8Abs) \
- V(IA32I16x8BitMask) \
- V(IA32I16x8ExtMulLowI8x16S) \
- V(IA32I16x8ExtMulHighI8x16S) \
- V(IA32I16x8ExtMulLowI8x16U) \
- V(IA32I16x8ExtMulHighI8x16U) \
- V(IA32I16x8ExtAddPairwiseI8x16S) \
- V(IA32I16x8ExtAddPairwiseI8x16U) \
- V(IA32I16x8Q15MulRSatS) \
- V(IA32I8x16Splat) \
- V(IA32I8x16ExtractLaneS) \
- V(IA32Pinsrb) \
- V(IA32Pinsrw) \
- V(IA32Pinsrd) \
- V(IA32Pextrb) \
- V(IA32Pextrw) \
- V(IA32S128Store32Lane) \
- V(IA32I8x16SConvertI16x8) \
- V(IA32I8x16Neg) \
- V(IA32I8x16Shl) \
- V(IA32I8x16ShrS) \
- V(IA32I8x16Add) \
- V(IA32I8x16AddSatS) \
- V(IA32I8x16Sub) \
- V(IA32I8x16SubSatS) \
- V(IA32I8x16MinS) \
- V(IA32I8x16MaxS) \
- V(IA32I8x16Eq) \
- V(SSEI8x16Ne) \
- V(AVXI8x16Ne) \
- V(IA32I8x16GtS) \
- V(SSEI8x16GeS) \
- V(AVXI8x16GeS) \
- V(IA32I8x16UConvertI16x8) \
- V(IA32I8x16AddSatU) \
- V(IA32I8x16SubSatU) \
- V(IA32I8x16ShrU) \
- V(IA32I8x16MinU) \
- V(IA32I8x16MaxU) \
- V(SSEI8x16GtU) \
- V(AVXI8x16GtU) \
- V(SSEI8x16GeU) \
- V(AVXI8x16GeU) \
- V(IA32I8x16RoundingAverageU) \
- V(IA32I8x16Abs) \
- V(IA32I8x16BitMask) \
- V(IA32I8x16Popcnt) \
- V(IA32S128Const) \
- V(IA32S128Zero) \
- V(IA32S128AllOnes) \
- V(IA32S128Not) \
- V(IA32S128And) \
- V(IA32S128Or) \
- V(IA32S128Xor) \
- V(IA32S128Select) \
- V(IA32S128AndNot) \
- V(IA32I8x16Swizzle) \
- V(IA32I8x16Shuffle) \
- V(IA32S128Load8Splat) \
- V(IA32S128Load16Splat) \
- V(IA32S128Load32Splat) \
- V(IA32S128Load64Splat) \
- V(IA32S128Load8x8S) \
- V(IA32S128Load8x8U) \
- V(IA32S128Load16x4S) \
- V(IA32S128Load16x4U) \
- V(IA32S128Load32x2S) \
- V(IA32S128Load32x2U) \
- V(IA32S32x4Rotate) \
- V(IA32S32x4Swizzle) \
- V(IA32S32x4Shuffle) \
- V(IA32S16x8Blend) \
- V(IA32S16x8HalfShuffle1) \
- V(IA32S16x8HalfShuffle2) \
- V(IA32S8x16Alignr) \
- V(IA32S16x8Dup) \
- V(IA32S8x16Dup) \
- V(SSES16x8UnzipHigh) \
- V(AVXS16x8UnzipHigh) \
- V(SSES16x8UnzipLow) \
- V(AVXS16x8UnzipLow) \
- V(SSES8x16UnzipHigh) \
- V(AVXS8x16UnzipHigh) \
- V(SSES8x16UnzipLow) \
- V(AVXS8x16UnzipLow) \
- V(IA32S64x2UnpackHigh) \
- V(IA32S32x4UnpackHigh) \
- V(IA32S16x8UnpackHigh) \
- V(IA32S8x16UnpackHigh) \
- V(IA32S64x2UnpackLow) \
- V(IA32S32x4UnpackLow) \
- V(IA32S16x8UnpackLow) \
- V(IA32S8x16UnpackLow) \
- V(SSES8x16TransposeLow) \
- V(AVXS8x16TransposeLow) \
- V(SSES8x16TransposeHigh) \
- V(AVXS8x16TransposeHigh) \
- V(SSES8x8Reverse) \
- V(AVXS8x8Reverse) \
- V(SSES8x4Reverse) \
- V(AVXS8x4Reverse) \
- V(SSES8x2Reverse) \
- V(AVXS8x2Reverse) \
- V(IA32S128AnyTrue) \
- V(IA32I64x2AllTrue) \
- V(IA32I32x4AllTrue) \
- V(IA32I16x8AllTrue) \
- V(IA32I8x16AllTrue) \
- V(IA32Word32AtomicPairLoad) \
- V(IA32Word32ReleasePairStore) \
- V(IA32Word32SeqCstPairStore) \
- V(IA32Word32AtomicPairAdd) \
- V(IA32Word32AtomicPairSub) \
- V(IA32Word32AtomicPairAnd) \
- V(IA32Word32AtomicPairOr) \
- V(IA32Word32AtomicPairXor) \
- V(IA32Word32AtomicPairExchange) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(IA32Add) \
+ V(IA32And) \
+ V(IA32Cmp) \
+ V(IA32Cmp16) \
+ V(IA32Cmp8) \
+ V(IA32Test) \
+ V(IA32Test16) \
+ V(IA32Test8) \
+ V(IA32Or) \
+ V(IA32Xor) \
+ V(IA32Sub) \
+ V(IA32Imul) \
+ V(IA32ImulHigh) \
+ V(IA32UmulHigh) \
+ V(IA32Idiv) \
+ V(IA32Udiv) \
+ V(IA32Not) \
+ V(IA32Neg) \
+ V(IA32Shl) \
+ V(IA32Shr) \
+ V(IA32Sar) \
+ V(IA32AddPair) \
+ V(IA32SubPair) \
+ V(IA32MulPair) \
+ V(IA32ShlPair) \
+ V(IA32ShrPair) \
+ V(IA32SarPair) \
+ V(IA32Rol) \
+ V(IA32Ror) \
+ V(IA32Lzcnt) \
+ V(IA32Tzcnt) \
+ V(IA32Popcnt) \
+ V(IA32Bswap) \
+ V(IA32MFence) \
+ V(IA32LFence) \
+ V(IA32Float32Cmp) \
+ V(IA32Float32Sqrt) \
+ V(IA32Float32Round) \
+ V(IA32Float64Cmp) \
+ V(IA32Float64Mod) \
+ V(IA32Float32Max) \
+ V(IA32Float64Max) \
+ V(IA32Float32Min) \
+ V(IA32Float64Min) \
+ V(IA32Float64Sqrt) \
+ V(IA32Float64Round) \
+ V(IA32Float32ToFloat64) \
+ V(IA32Float64ToFloat32) \
+ V(IA32Float32ToInt32) \
+ V(IA32Float32ToUint32) \
+ V(IA32Float64ToInt32) \
+ V(IA32Float64ToUint32) \
+ V(SSEInt32ToFloat32) \
+ V(IA32Uint32ToFloat32) \
+ V(SSEInt32ToFloat64) \
+ V(IA32Uint32ToFloat64) \
+ V(IA32Float64ExtractLowWord32) \
+ V(IA32Float64ExtractHighWord32) \
+ V(IA32Float64InsertLowWord32) \
+ V(IA32Float64InsertHighWord32) \
+ V(IA32Float64LoadLowWord32) \
+ V(IA32Float64SilenceNaN) \
+ V(Float32Add) \
+ V(Float32Sub) \
+ V(Float64Add) \
+ V(Float64Sub) \
+ V(Float32Mul) \
+ V(Float32Div) \
+ V(Float64Mul) \
+ V(Float64Div) \
+ V(Float64Abs) \
+ V(Float64Neg) \
+ V(Float32Abs) \
+ V(Float32Neg) \
+ V(IA32Movsxbl) \
+ V(IA32Movzxbl) \
+ V(IA32Movb) \
+ V(IA32Movsxwl) \
+ V(IA32Movzxwl) \
+ V(IA32Movw) \
+ V(IA32Movl) \
+ V(IA32Movss) \
+ V(IA32Movsd) \
+ V(IA32Movdqu) \
+ V(IA32Movlps) \
+ V(IA32Movhps) \
+ V(IA32BitcastFI) \
+ V(IA32BitcastIF) \
+ V(IA32Lea) \
+ V(IA32Push) \
+ V(IA32Poke) \
+ V(IA32Peek) \
+ V(IA32F64x2Splat) \
+ V(F64x2ExtractLane) \
+ V(F64x2ReplaceLane) \
+ V(IA32F64x2Sqrt) \
+ V(IA32F64x2Add) \
+ V(IA32F64x2Sub) \
+ V(IA32F64x2Mul) \
+ V(IA32F64x2Div) \
+ V(IA32F64x2Min) \
+ V(IA32F64x2Max) \
+ V(IA32F64x2Eq) \
+ V(IA32F64x2Ne) \
+ V(IA32F64x2Lt) \
+ V(IA32F64x2Le) \
+ V(IA32F64x2Pmin) \
+ V(IA32F64x2Pmax) \
+ V(IA32F64x2Round) \
+ V(IA32F64x2ConvertLowI32x4S) \
+ V(IA32F64x2ConvertLowI32x4U) \
+ V(IA32F64x2PromoteLowF32x4) \
+ V(IA32I64x2SplatI32Pair) \
+ V(IA32I64x2ReplaceLaneI32Pair) \
+ V(IA32I64x2Abs) \
+ V(IA32I64x2Neg) \
+ V(IA32I64x2Shl) \
+ V(IA32I64x2ShrS) \
+ V(IA32I64x2Add) \
+ V(IA32I64x2Sub) \
+ V(IA32I64x2Mul) \
+ V(IA32I64x2ShrU) \
+ V(IA32I64x2BitMask) \
+ V(IA32I64x2Eq) \
+ V(IA32I64x2Ne) \
+ V(IA32I64x2GtS) \
+ V(IA32I64x2GeS) \
+ V(IA32I64x2ExtMulLowI32x4S) \
+ V(IA32I64x2ExtMulHighI32x4S) \
+ V(IA32I64x2ExtMulLowI32x4U) \
+ V(IA32I64x2ExtMulHighI32x4U) \
+ V(IA32I64x2SConvertI32x4Low) \
+ V(IA32I64x2SConvertI32x4High) \
+ V(IA32I64x2UConvertI32x4Low) \
+ V(IA32I64x2UConvertI32x4High) \
+ V(IA32F32x4Splat) \
+ V(IA32F32x4ExtractLane) \
+ V(IA32Insertps) \
+ V(IA32F32x4SConvertI32x4) \
+ V(IA32F32x4UConvertI32x4) \
+ V(IA32F32x4Sqrt) \
+ V(IA32F32x4RecipApprox) \
+ V(IA32F32x4RecipSqrtApprox) \
+ V(IA32F32x4Add) \
+ V(IA32F32x4Sub) \
+ V(IA32F32x4Mul) \
+ V(IA32F32x4Div) \
+ V(IA32F32x4Min) \
+ V(IA32F32x4Max) \
+ V(IA32F32x4Eq) \
+ V(IA32F32x4Ne) \
+ V(IA32F32x4Lt) \
+ V(IA32F32x4Le) \
+ V(IA32F32x4Pmin) \
+ V(IA32F32x4Pmax) \
+ V(IA32F32x4Round) \
+ V(IA32F32x4DemoteF64x2Zero) \
+ V(IA32I32x4Splat) \
+ V(IA32I32x4ExtractLane) \
+ V(IA32I32x4SConvertF32x4) \
+ V(IA32I32x4SConvertI16x8Low) \
+ V(IA32I32x4SConvertI16x8High) \
+ V(IA32I32x4Neg) \
+ V(IA32I32x4Shl) \
+ V(IA32I32x4ShrS) \
+ V(IA32I32x4Add) \
+ V(IA32I32x4Sub) \
+ V(IA32I32x4Mul) \
+ V(IA32I32x4MinS) \
+ V(IA32I32x4MaxS) \
+ V(IA32I32x4Eq) \
+ V(IA32I32x4Ne) \
+ V(IA32I32x4GtS) \
+ V(IA32I32x4GeS) \
+ V(SSEI32x4UConvertF32x4) \
+ V(AVXI32x4UConvertF32x4) \
+ V(IA32I32x4UConvertI16x8Low) \
+ V(IA32I32x4UConvertI16x8High) \
+ V(IA32I32x4ShrU) \
+ V(IA32I32x4MinU) \
+ V(IA32I32x4MaxU) \
+ V(SSEI32x4GtU) \
+ V(AVXI32x4GtU) \
+ V(SSEI32x4GeU) \
+ V(AVXI32x4GeU) \
+ V(IA32I32x4Abs) \
+ V(IA32I32x4BitMask) \
+ V(IA32I32x4DotI16x8S) \
+ V(IA32I32x4ExtMulLowI16x8S) \
+ V(IA32I32x4ExtMulHighI16x8S) \
+ V(IA32I32x4ExtMulLowI16x8U) \
+ V(IA32I32x4ExtMulHighI16x8U) \
+ V(IA32I32x4ExtAddPairwiseI16x8S) \
+ V(IA32I32x4ExtAddPairwiseI16x8U) \
+ V(IA32I32x4TruncSatF64x2SZero) \
+ V(IA32I32x4TruncSatF64x2UZero) \
+ V(IA32I16x8Splat) \
+ V(IA32I16x8ExtractLaneS) \
+ V(IA32I16x8SConvertI8x16Low) \
+ V(IA32I16x8SConvertI8x16High) \
+ V(IA32I16x8Neg) \
+ V(IA32I16x8Shl) \
+ V(IA32I16x8ShrS) \
+ V(IA32I16x8SConvertI32x4) \
+ V(IA32I16x8Add) \
+ V(IA32I16x8AddSatS) \
+ V(IA32I16x8Sub) \
+ V(IA32I16x8SubSatS) \
+ V(IA32I16x8Mul) \
+ V(IA32I16x8MinS) \
+ V(IA32I16x8MaxS) \
+ V(IA32I16x8Eq) \
+ V(SSEI16x8Ne) \
+ V(AVXI16x8Ne) \
+ V(IA32I16x8GtS) \
+ V(SSEI16x8GeS) \
+ V(AVXI16x8GeS) \
+ V(IA32I16x8UConvertI8x16Low) \
+ V(IA32I16x8UConvertI8x16High) \
+ V(IA32I16x8ShrU) \
+ V(IA32I16x8UConvertI32x4) \
+ V(IA32I16x8AddSatU) \
+ V(IA32I16x8SubSatU) \
+ V(IA32I16x8MinU) \
+ V(IA32I16x8MaxU) \
+ V(SSEI16x8GtU) \
+ V(AVXI16x8GtU) \
+ V(SSEI16x8GeU) \
+ V(AVXI16x8GeU) \
+ V(IA32I16x8RoundingAverageU) \
+ V(IA32I16x8Abs) \
+ V(IA32I16x8BitMask) \
+ V(IA32I16x8ExtMulLowI8x16S) \
+ V(IA32I16x8ExtMulHighI8x16S) \
+ V(IA32I16x8ExtMulLowI8x16U) \
+ V(IA32I16x8ExtMulHighI8x16U) \
+ V(IA32I16x8ExtAddPairwiseI8x16S) \
+ V(IA32I16x8ExtAddPairwiseI8x16U) \
+ V(IA32I16x8Q15MulRSatS) \
+ V(IA32I8x16Splat) \
+ V(IA32I8x16ExtractLaneS) \
+ V(IA32Pinsrb) \
+ V(IA32Pinsrw) \
+ V(IA32Pinsrd) \
+ V(IA32Pextrb) \
+ V(IA32Pextrw) \
+ V(IA32S128Store32Lane) \
+ V(IA32I8x16SConvertI16x8) \
+ V(IA32I8x16Neg) \
+ V(IA32I8x16Shl) \
+ V(IA32I8x16ShrS) \
+ V(IA32I8x16Add) \
+ V(IA32I8x16AddSatS) \
+ V(IA32I8x16Sub) \
+ V(IA32I8x16SubSatS) \
+ V(IA32I8x16MinS) \
+ V(IA32I8x16MaxS) \
+ V(IA32I8x16Eq) \
+ V(SSEI8x16Ne) \
+ V(AVXI8x16Ne) \
+ V(IA32I8x16GtS) \
+ V(SSEI8x16GeS) \
+ V(AVXI8x16GeS) \
+ V(IA32I8x16UConvertI16x8) \
+ V(IA32I8x16AddSatU) \
+ V(IA32I8x16SubSatU) \
+ V(IA32I8x16ShrU) \
+ V(IA32I8x16MinU) \
+ V(IA32I8x16MaxU) \
+ V(SSEI8x16GtU) \
+ V(AVXI8x16GtU) \
+ V(SSEI8x16GeU) \
+ V(AVXI8x16GeU) \
+ V(IA32I8x16RoundingAverageU) \
+ V(IA32I8x16Abs) \
+ V(IA32I8x16BitMask) \
+ V(IA32I8x16Popcnt) \
+ V(IA32S128Const) \
+ V(IA32S128Zero) \
+ V(IA32S128AllOnes) \
+ V(IA32S128Not) \
+ V(IA32S128And) \
+ V(IA32S128Or) \
+ V(IA32S128Xor) \
+ V(IA32S128Select) \
+ V(IA32S128AndNot) \
+ V(IA32I8x16Swizzle) \
+ V(IA32I8x16Shuffle) \
+ V(IA32S128Load8Splat) \
+ V(IA32S128Load16Splat) \
+ V(IA32S128Load32Splat) \
+ V(IA32S128Load64Splat) \
+ V(IA32S128Load8x8S) \
+ V(IA32S128Load8x8U) \
+ V(IA32S128Load16x4S) \
+ V(IA32S128Load16x4U) \
+ V(IA32S128Load32x2S) \
+ V(IA32S128Load32x2U) \
+ V(IA32S32x4Rotate) \
+ V(IA32S32x4Swizzle) \
+ V(IA32S32x4Shuffle) \
+ V(IA32S16x8Blend) \
+ V(IA32S16x8HalfShuffle1) \
+ V(IA32S16x8HalfShuffle2) \
+ V(IA32S8x16Alignr) \
+ V(IA32S16x8Dup) \
+ V(IA32S8x16Dup) \
+ V(SSES16x8UnzipHigh) \
+ V(AVXS16x8UnzipHigh) \
+ V(SSES16x8UnzipLow) \
+ V(AVXS16x8UnzipLow) \
+ V(SSES8x16UnzipHigh) \
+ V(AVXS8x16UnzipHigh) \
+ V(SSES8x16UnzipLow) \
+ V(AVXS8x16UnzipLow) \
+ V(IA32S64x2UnpackHigh) \
+ V(IA32S32x4UnpackHigh) \
+ V(IA32S16x8UnpackHigh) \
+ V(IA32S8x16UnpackHigh) \
+ V(IA32S64x2UnpackLow) \
+ V(IA32S32x4UnpackLow) \
+ V(IA32S16x8UnpackLow) \
+ V(IA32S8x16UnpackLow) \
+ V(SSES8x16TransposeLow) \
+ V(AVXS8x16TransposeLow) \
+ V(SSES8x16TransposeHigh) \
+ V(AVXS8x16TransposeHigh) \
+ V(SSES8x8Reverse) \
+ V(AVXS8x8Reverse) \
+ V(SSES8x4Reverse) \
+ V(AVXS8x4Reverse) \
+ V(SSES8x2Reverse) \
+ V(AVXS8x2Reverse) \
+ V(IA32S128AnyTrue) \
+ V(IA32I64x2AllTrue) \
+ V(IA32I32x4AllTrue) \
+ V(IA32I16x8AllTrue) \
+ V(IA32I8x16AllTrue) \
+ V(IA32Word32AtomicPairLoad) \
+ V(IA32Word32ReleasePairStore) \
+ V(IA32Word32SeqCstPairStore) \
+ V(IA32Word32AtomicPairAdd) \
+ V(IA32Word32AtomicPairSub) \
+ V(IA32Word32AtomicPairAnd) \
+ V(IA32Word32AtomicPairOr) \
+ V(IA32Word32AtomicPairXor) \
+ V(IA32Word32AtomicPairExchange) \
V(IA32Word32AtomicPairCompareExchange)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index 3910d45195..01e4f8faa8 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -48,33 +48,33 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Popcnt:
case kIA32Bswap:
case kIA32Lea:
- case kSSEFloat32Cmp:
- case kSSEFloat32Sqrt:
- case kSSEFloat32Round:
- case kSSEFloat64Cmp:
- case kSSEFloat64Mod:
- case kSSEFloat32Max:
- case kSSEFloat64Max:
- case kSSEFloat32Min:
- case kSSEFloat64Min:
- case kSSEFloat64Sqrt:
- case kSSEFloat64Round:
- case kSSEFloat32ToFloat64:
- case kSSEFloat64ToFloat32:
- case kSSEFloat32ToInt32:
- case kSSEFloat32ToUint32:
- case kSSEFloat64ToInt32:
- case kSSEFloat64ToUint32:
+ case kIA32Float32Cmp:
+ case kIA32Float32Sqrt:
+ case kIA32Float32Round:
+ case kIA32Float64Cmp:
+ case kIA32Float64Mod:
+ case kIA32Float32Max:
+ case kIA32Float64Max:
+ case kIA32Float32Min:
+ case kIA32Float64Min:
+ case kIA32Float64Sqrt:
+ case kIA32Float64Round:
+ case kIA32Float32ToFloat64:
+ case kIA32Float64ToFloat32:
+ case kIA32Float32ToInt32:
+ case kIA32Float32ToUint32:
+ case kIA32Float64ToInt32:
+ case kIA32Float64ToUint32:
case kSSEInt32ToFloat32:
- case kSSEUint32ToFloat32:
+ case kIA32Uint32ToFloat32:
case kSSEInt32ToFloat64:
- case kSSEUint32ToFloat64:
- case kSSEFloat64ExtractLowWord32:
- case kSSEFloat64ExtractHighWord32:
- case kSSEFloat64InsertLowWord32:
- case kSSEFloat64InsertHighWord32:
- case kSSEFloat64LoadLowWord32:
- case kSSEFloat64SilenceNaN:
+ case kIA32Uint32ToFloat64:
+ case kIA32Float64ExtractLowWord32:
+ case kIA32Float64ExtractHighWord32:
+ case kIA32Float64InsertLowWord32:
+ case kIA32Float64InsertHighWord32:
+ case kIA32Float64LoadLowWord32:
+ case kIA32Float64SilenceNaN:
case kFloat32Add:
case kFloat32Sub:
case kFloat64Add:
@@ -137,8 +137,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Insertps:
case kIA32F32x4SConvertI32x4:
case kIA32F32x4UConvertI32x4:
- case kIA32F32x4Abs:
- case kIA32F32x4Neg:
case kIA32F32x4Sqrt:
case kIA32F32x4RecipApprox:
case kIA32F32x4RecipSqrtApprox:
@@ -146,10 +144,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32F32x4Sub:
case kIA32F32x4Mul:
case kIA32F32x4Div:
- case kSSEF32x4Min:
- case kAVXF32x4Min:
- case kSSEF32x4Max:
- case kAVXF32x4Max:
+ case kIA32F32x4Min:
+ case kIA32F32x4Max:
case kIA32F32x4Eq:
case kIA32F32x4Ne:
case kIA32F32x4Lt:
@@ -406,8 +402,8 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kIA32Imul:
case kIA32ImulHigh:
return 5;
- case kSSEFloat32Cmp:
- case kSSEFloat64Cmp:
+ case kIA32Float32Cmp:
+ case kIA32Float64Cmp:
return 9;
case kFloat32Add:
case kFloat32Sub:
@@ -415,24 +411,24 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kFloat64Sub:
case kFloat32Abs:
case kFloat32Neg:
- case kSSEFloat64Max:
- case kSSEFloat64Min:
+ case kIA32Float64Max:
+ case kIA32Float64Min:
case kFloat64Abs:
case kFloat64Neg:
return 5;
case kFloat32Mul:
return 4;
- case kSSEFloat32ToFloat64:
- case kSSEFloat64ToFloat32:
+ case kIA32Float32ToFloat64:
+ case kIA32Float64ToFloat32:
return 6;
- case kSSEFloat32Round:
- case kSSEFloat64Round:
- case kSSEFloat32ToInt32:
- case kSSEFloat64ToInt32:
+ case kIA32Float32Round:
+ case kIA32Float64Round:
+ case kIA32Float32ToInt32:
+ case kIA32Float64ToInt32:
return 8;
- case kSSEFloat32ToUint32:
+ case kIA32Float32ToUint32:
return 21;
- case kSSEFloat64ToUint32:
+ case kIA32Float64ToUint32:
return 15;
case kIA32Idiv:
return 33;
@@ -442,10 +438,10 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return 35;
case kFloat64Div:
return 63;
- case kSSEFloat32Sqrt:
- case kSSEFloat64Sqrt:
+ case kIA32Float32Sqrt:
+ case kIA32Float64Sqrt:
return 25;
- case kSSEFloat64Mod:
+ case kIA32Float64Mod:
return 50;
case kArchTruncateDoubleToI:
return 9;
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index ce792692f0..8c2b58564a 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -99,11 +99,14 @@ class IA32OperandGenerator final : public OperandGenerator {
bool CanBeImmediate(Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
- case IrOpcode::kNumberConstant:
case IrOpcode::kExternalConstant:
case IrOpcode::kRelocatableInt32Constant:
case IrOpcode::kRelocatableInt64Constant:
return true;
+ case IrOpcode::kNumberConstant: {
+ const double value = OpParameter<double>(node->op());
+ return bit_cast<int64_t>(value) == 0;
+ }
case IrOpcode::kHeapConstant: {
// TODO(bmeurer): We must not dereference handles concurrently. If we
// really have to this here, then we need to find a way to put this
@@ -329,10 +332,13 @@ void VisitRROFloat(InstructionSelector* selector, Node* node,
void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
ArchOpcode opcode) {
IA32OperandGenerator g(selector);
+ InstructionOperand temps[] = {g.TempRegister()};
if (selector->IsSupported(AVX)) {
- selector->Emit(opcode, g.DefineAsRegister(node), g.Use(input));
+ selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(input),
+ arraysize(temps), temps);
} else {
- selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(input),
+ arraysize(temps), temps);
}
}
@@ -455,9 +461,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
IA32OperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), edx));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), edx));
}
void InstructionSelector::VisitLoadLane(Node* node) {
@@ -575,7 +581,7 @@ void InstructionSelector::VisitLoad(Node* node, Node* value,
InstructionOperand inputs[3];
size_t input_count = 0;
AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
Emit(code, 1, outputs, input_count, inputs);
}
@@ -1123,53 +1129,53 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
VisitShift(this, node, kIA32Ror);
}
-#define RO_OP_LIST(V) \
- V(Word32Clz, kIA32Lzcnt) \
- V(Word32Ctz, kIA32Tzcnt) \
- V(Word32Popcnt, kIA32Popcnt) \
- V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \
- V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \
- V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \
- V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \
- V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \
- V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \
- V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \
- V(BitcastFloat32ToInt32, kIA32BitcastFI) \
- V(BitcastInt32ToFloat32, kIA32BitcastIF) \
- V(Float32Sqrt, kSSEFloat32Sqrt) \
- V(Float64Sqrt, kSSEFloat64Sqrt) \
- V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
- V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32) \
- V(SignExtendWord8ToInt32, kIA32Movsxbl) \
- V(SignExtendWord16ToInt32, kIA32Movsxwl) \
+#define RO_OP_LIST(V) \
+ V(Word32Clz, kIA32Lzcnt) \
+ V(Word32Ctz, kIA32Tzcnt) \
+ V(Word32Popcnt, kIA32Popcnt) \
+ V(ChangeFloat32ToFloat64, kIA32Float32ToFloat64) \
+ V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \
+ V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \
+ V(TruncateFloat32ToInt32, kIA32Float32ToInt32) \
+ V(ChangeFloat64ToInt32, kIA32Float64ToInt32) \
+ V(TruncateFloat64ToFloat32, kIA32Float64ToFloat32) \
+ V(RoundFloat64ToInt32, kIA32Float64ToInt32) \
+ V(BitcastFloat32ToInt32, kIA32BitcastFI) \
+ V(BitcastInt32ToFloat32, kIA32BitcastIF) \
+ V(Float32Sqrt, kIA32Float32Sqrt) \
+ V(Float64Sqrt, kIA32Float64Sqrt) \
+ V(Float64ExtractLowWord32, kIA32Float64ExtractLowWord32) \
+ V(Float64ExtractHighWord32, kIA32Float64ExtractHighWord32) \
+ V(SignExtendWord8ToInt32, kIA32Movsxbl) \
+ V(SignExtendWord16ToInt32, kIA32Movsxwl) \
V(F64x2Sqrt, kIA32F64x2Sqrt)
-#define RO_WITH_TEMP_OP_LIST(V) V(ChangeUint32ToFloat64, kSSEUint32ToFloat64)
-
-#define RO_WITH_TEMP_SIMD_OP_LIST(V) \
- V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \
- V(ChangeFloat64ToUint32, kSSEFloat64ToUint32) \
- V(TruncateFloat64ToUint32, kSSEFloat64ToUint32)
-
-#define RR_OP_LIST(V) \
- V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
- V(Float32RoundDown, kSSEFloat32Round | MiscField::encode(kRoundDown)) \
- V(Float64RoundDown, kSSEFloat64Round | MiscField::encode(kRoundDown)) \
- V(Float32RoundUp, kSSEFloat32Round | MiscField::encode(kRoundUp)) \
- V(Float64RoundUp, kSSEFloat64Round | MiscField::encode(kRoundUp)) \
- V(Float32RoundTruncate, kSSEFloat32Round | MiscField::encode(kRoundToZero)) \
- V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \
- V(Float32RoundTiesEven, \
- kSSEFloat32Round | MiscField::encode(kRoundToNearest)) \
- V(Float64RoundTiesEven, \
- kSSEFloat64Round | MiscField::encode(kRoundToNearest)) \
- V(F32x4Ceil, kIA32F32x4Round | MiscField::encode(kRoundUp)) \
- V(F32x4Floor, kIA32F32x4Round | MiscField::encode(kRoundDown)) \
- V(F32x4Trunc, kIA32F32x4Round | MiscField::encode(kRoundToZero)) \
- V(F32x4NearestInt, kIA32F32x4Round | MiscField::encode(kRoundToNearest)) \
- V(F64x2Ceil, kIA32F64x2Round | MiscField::encode(kRoundUp)) \
- V(F64x2Floor, kIA32F64x2Round | MiscField::encode(kRoundDown)) \
- V(F64x2Trunc, kIA32F64x2Round | MiscField::encode(kRoundToZero)) \
+#define RO_WITH_TEMP_OP_LIST(V) V(ChangeUint32ToFloat64, kIA32Uint32ToFloat64)
+
+#define RO_WITH_TEMP_SIMD_OP_LIST(V) \
+ V(TruncateFloat32ToUint32, kIA32Float32ToUint32) \
+ V(ChangeFloat64ToUint32, kIA32Float64ToUint32) \
+ V(TruncateFloat64ToUint32, kIA32Float64ToUint32)
+
+#define RR_OP_LIST(V) \
+ V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
+ V(Float32RoundDown, kIA32Float32Round | MiscField::encode(kRoundDown)) \
+ V(Float64RoundDown, kIA32Float64Round | MiscField::encode(kRoundDown)) \
+ V(Float32RoundUp, kIA32Float32Round | MiscField::encode(kRoundUp)) \
+ V(Float64RoundUp, kIA32Float64Round | MiscField::encode(kRoundUp)) \
+ V(Float32RoundTruncate, kIA32Float32Round | MiscField::encode(kRoundToZero)) \
+ V(Float64RoundTruncate, kIA32Float64Round | MiscField::encode(kRoundToZero)) \
+ V(Float32RoundTiesEven, \
+ kIA32Float32Round | MiscField::encode(kRoundToNearest)) \
+ V(Float64RoundTiesEven, \
+ kIA32Float64Round | MiscField::encode(kRoundToNearest)) \
+ V(F32x4Ceil, kIA32F32x4Round | MiscField::encode(kRoundUp)) \
+ V(F32x4Floor, kIA32F32x4Round | MiscField::encode(kRoundDown)) \
+ V(F32x4Trunc, kIA32F32x4Round | MiscField::encode(kRoundToZero)) \
+ V(F32x4NearestInt, kIA32F32x4Round | MiscField::encode(kRoundToNearest)) \
+ V(F64x2Ceil, kIA32F64x2Round | MiscField::encode(kRoundUp)) \
+ V(F64x2Floor, kIA32F64x2Round | MiscField::encode(kRoundDown)) \
+ V(F64x2Trunc, kIA32F64x2Round | MiscField::encode(kRoundToZero)) \
V(F64x2NearestInt, kIA32F64x2Round | MiscField::encode(kRoundToNearest))
#define RRO_FLOAT_OP_LIST(V) \
@@ -1195,6 +1201,8 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
V(Float64Abs, kFloat64Abs) \
V(Float32Neg, kFloat32Neg) \
V(Float64Neg, kFloat64Neg) \
+ V(F32x4Abs, kFloat32Abs) \
+ V(F32x4Neg, kFloat32Neg) \
V(F64x2Abs, kFloat64Abs) \
V(F64x2Neg, kFloat64Neg)
@@ -1347,14 +1355,14 @@ void InstructionSelector::VisitUint32Mod(Node* node) {
void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
- Emit(kSSEUint32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
+ Emit(kIA32Uint32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
arraysize(temps), temps);
}
void InstructionSelector::VisitFloat64Mod(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister()};
- Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
+ Emit(kIA32Float64Mod, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
arraysize(temps), temps);
}
@@ -1362,7 +1370,7 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
void InstructionSelector::VisitFloat32Max(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
- Emit(kSSEFloat32Max, g.DefineSameAsFirst(node),
+ Emit(kIA32Float32Max, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
arraysize(temps), temps);
}
@@ -1370,7 +1378,7 @@ void InstructionSelector::VisitFloat32Max(Node* node) {
void InstructionSelector::VisitFloat64Max(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
- Emit(kSSEFloat64Max, g.DefineSameAsFirst(node),
+ Emit(kIA32Float64Max, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
arraysize(temps), temps);
}
@@ -1378,7 +1386,7 @@ void InstructionSelector::VisitFloat64Max(Node* node) {
void InstructionSelector::VisitFloat32Min(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
- Emit(kSSEFloat32Min, g.DefineSameAsFirst(node),
+ Emit(kIA32Float32Min, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
arraysize(temps), temps);
}
@@ -1386,7 +1394,7 @@ void InstructionSelector::VisitFloat32Min(Node* node) {
void InstructionSelector::VisitFloat64Min(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
- Emit(kSSEFloat64Min, g.DefineSameAsFirst(node),
+ Emit(kIA32Float64Min, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
arraysize(temps), temps);
}
@@ -1622,7 +1630,7 @@ void VisitFloat32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Node* const left = node->InputAt(0);
Node* const right = node->InputAt(1);
- VisitCompare(selector, kSSEFloat32Cmp, right, left, cont, false);
+ VisitCompare(selector, kIA32Float32Cmp, right, left, cont, false);
}
// Shared routine for multiple float64 compare operations (inputs commuted).
@@ -1630,7 +1638,7 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Node* const left = node->InputAt(0);
Node* const right = node->InputAt(1);
- VisitCompare(selector, kSSEFloat64Cmp, right, left, cont, false);
+ VisitCompare(selector, kIA32Float64Cmp, right, left, cont, false);
}
// Shared routine for multiple word compare operations.
@@ -1965,10 +1973,10 @@ void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
Float64Matcher mleft(left);
if (mleft.HasResolvedValue() &&
(bit_cast<uint64_t>(mleft.ResolvedValue()) >> 32) == 0u) {
- Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
+ Emit(kIA32Float64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
return;
}
- Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
+ Emit(kIA32Float64InsertLowWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.Use(right));
}
@@ -1976,13 +1984,13 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
IA32OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
+ Emit(kIA32Float64InsertHighWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.Use(right));
}
void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
IA32OperandGenerator g(this);
- Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node),
+ Emit(kIA32Float64SilenceNaN, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)));
}
@@ -2247,8 +2255,6 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I8x16)
#define SIMD_BINOP_LIST(V) \
- V(F32x4Min) \
- V(F32x4Max) \
V(I32x4GtU) \
V(I32x4GeU) \
V(I16x8Ne) \
@@ -2269,6 +2275,8 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(F32x4Ne) \
V(F32x4Lt) \
V(F32x4Le) \
+ V(F32x4Min) \
+ V(F32x4Max) \
V(I64x2Add) \
V(I64x2Sub) \
V(I64x2Eq) \
@@ -2339,10 +2347,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#define SIMD_UNOP_LIST(V) \
V(F64x2ConvertLowI32x4S) \
- V(F64x2PromoteLowF32x4) \
V(F32x4DemoteF64x2Zero) \
- V(F32x4Abs) \
- V(F32x4Neg) \
V(F32x4Sqrt) \
V(F32x4SConvertI32x4) \
V(F32x4RecipApprox) \
@@ -3169,6 +3174,25 @@ void InstructionSelector::VisitI64x2Abs(Node* node) {
VisitRRSimd(this, node, kIA32I64x2Abs, kIA32I64x2Abs);
}
+void InstructionSelector::VisitF64x2PromoteLowF32x4(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionCode code = kIA32F64x2PromoteLowF32x4;
+ Node* input = node->InputAt(0);
+ LoadTransformMatcher m(input);
+
+ if (m.Is(LoadTransformation::kS128Load64Zero) && CanCover(node, input)) {
+ // Trap handler is not supported on IA32.
+ DCHECK_NE(m.ResolvedValue().kind, MemoryAccessKind::kProtected);
+ // LoadTransforms cannot be eliminated, so they are visited even if
+ // unused. Mark it as defined so that we don't visit it.
+ MarkAsDefined(input);
+ VisitLoad(node, input, code);
+ return;
+ }
+
+ VisitRR(this, node, code);
+}
+
void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
int first_input_index,
Node* node) {
diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h
index 63cf3ca06f..56d4d960bd 100644
--- a/deps/v8/src/compiler/backend/instruction-codes.h
+++ b/deps/v8/src/compiler/backend/instruction-codes.h
@@ -92,7 +92,7 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
V(ArchBinarySearchSwitch) \
V(ArchTableSwitch) \
V(ArchNop) \
- V(ArchAbortCSAAssert) \
+ V(ArchAbortCSADcheck) \
V(ArchDebugBreak) \
V(ArchComment) \
V(ArchThrowTerminator) \
@@ -296,23 +296,58 @@ static_assert(
"All addressing modes must fit in the 5-bit AddressingModeField.");
using FlagsModeField = base::BitField<FlagsMode, 14, 3>;
using FlagsConditionField = base::BitField<FlagsCondition, 17, 5>;
-using DeoptImmedArgsCountField = base::BitField<int, 22, 2>;
-using DeoptFrameStateOffsetField = base::BitField<int, 24, 8>;
+using MiscField = base::BitField<int, 22, 10>;
+
+// {MiscField} is used for a variety of things, depending on the opcode.
+// TODO(turbofan): There should be an abstraction that ensures safe encoding and
+// decoding. {HasMemoryAccessMode} and its uses are a small step in that
+// direction.
+
// LaneSizeField and AccessModeField are helper types to encode/decode a lane
// size, an access mode, or both inside the overlapping MiscField.
using LaneSizeField = base::BitField<int, 22, 8>;
using AccessModeField = base::BitField<MemoryAccessMode, 30, 2>;
+// TODO(turbofan): {HasMemoryAccessMode} is currently only used to guard
+// decoding (in CodeGenerator and InstructionScheduler). Encoding (in
+// InstructionSelector) is not yet guarded. There are in fact instructions for
+// which InstructionSelector does set a MemoryAccessMode but CodeGenerator
+// doesn't care to consume it (e.g. kArm64LdrDecompressTaggedSigned). This is
+// scary. {HasMemoryAccessMode} does not include these instructions, so they can
+// be easily found by guarding encoding.
+inline bool HasMemoryAccessMode(ArchOpcode opcode) {
+ switch (opcode) {
+#define CASE(Name) \
+ case k##Name: \
+ return true;
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(CASE)
+#undef CASE
+ default:
+ return false;
+ }
+}
+
+using DeoptImmedArgsCountField = base::BitField<int, 22, 2>;
+using DeoptFrameStateOffsetField = base::BitField<int, 24, 8>;
+
// AtomicWidthField overlaps with MiscField and is used for the various Atomic
// opcodes. Only used on 64bit architectures. All atomic instructions on 32bit
// architectures are assumed to be 32bit wide.
using AtomicWidthField = base::BitField<AtomicWidth, 22, 2>;
+
// AtomicMemoryOrderField overlaps with MiscField and is used for the various
// Atomic opcodes. This field is not used on all architectures. It is used on
// architectures where the codegen for kSeqCst and kAcqRel differ only by
// emitting fences.
using AtomicMemoryOrderField = base::BitField<AtomicMemoryOrder, 24, 2>;
using AtomicStoreRecordWriteModeField = base::BitField<RecordWriteMode, 26, 4>;
-using MiscField = base::BitField<int, 22, 10>;
+
+// ParamField and FPParamField overlap with MiscField, as the latter is never
+// used for Call instructions. These 2 fields represent the general purpose
+// and floating point parameter counts of a direct call into C and are given 5
+// bits each, which allow storing a number up to the current maximum parameter
+// count, which is 20 (see kMaxCParameters defined in macro-assembler.h).
+using ParamField = base::BitField<int, 22, 5>;
+using FPParamField = base::BitField<int, 27, 5>;
// This static assertion serves as an early warning if we are about to exhaust
// the available opcode space. If we are about to exhaust it, we should start
diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc
index bdad838f3e..3d0be78262 100644
--- a/deps/v8/src/compiler/backend/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc
@@ -167,12 +167,16 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
last_side_effect_instr_->AddSuccessor(new_node);
}
pending_loads_.push_back(new_node);
- } else if (instr->IsDeoptimizeCall() || instr->IsTrap()) {
+ } else if (instr->IsDeoptimizeCall() || CanTrap(instr)) {
// Ensure that deopts or traps are not reordered with respect to
// side-effect instructions.
if (last_side_effect_instr_ != nullptr) {
last_side_effect_instr_->AddSuccessor(new_node);
}
+ }
+
+ // Update last deoptimization or trap point.
+ if (instr->IsDeoptimizeCall() || CanTrap(instr)) {
last_deopt_or_trap_ = new_node;
}
@@ -304,7 +308,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
#if V8_ENABLE_WEBASSEMBLY
case kArchTailCallWasm:
#endif // V8_ENABLE_WEBASSEMBLY
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
return kHasSideEffect;
case kArchDebugBreak:
diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.h b/deps/v8/src/compiler/backend/instruction-scheduler.h
index c22190bd50..d4c08a033d 100644
--- a/deps/v8/src/compiler/backend/instruction-scheduler.h
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.h
@@ -169,6 +169,12 @@ class InstructionScheduler final : public ZoneObject {
return (GetInstructionFlags(instr) & kIsLoadOperation) != 0;
}
+ bool CanTrap(const Instruction* instr) const {
+ return instr->IsTrap() ||
+ (instr->HasMemoryAccessMode() &&
+ instr->memory_access_mode() == kMemoryAccessProtected);
+ }
+
// The scheduler will not move the following instructions before the last
// deopt/trap check:
// * loads (this is conservative)
@@ -184,7 +190,7 @@ class InstructionScheduler final : public ZoneObject {
// trap point we encountered.
bool DependsOnDeoptOrTrap(const Instruction* instr) const {
return MayNeedDeoptOrTrapCheck(instr) || instr->IsDeoptimizeCall() ||
- instr->IsTrap() || HasSideEffect(instr) || IsLoadOperation(instr);
+ CanTrap(instr) || HasSideEffect(instr) || IsLoadOperation(instr);
}
// Identify nops used as a definition point for live-in registers at
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index cd2b83ac3d..beb716abbe 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -1195,9 +1195,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
if (node->opcode() == IrOpcode::kStore ||
node->opcode() == IrOpcode::kUnalignedStore ||
node->opcode() == IrOpcode::kCall ||
- node->opcode() == IrOpcode::kProtectedLoad ||
node->opcode() == IrOpcode::kProtectedStore ||
- node->opcode() == IrOpcode::kLoadTransform ||
#define ADD_EFFECT_FOR_ATOMIC_OP(Opcode) \
node->opcode() == IrOpcode::k##Opcode ||
MACHINE_ATOMIC_OP_LIST(ADD_EFFECT_FOR_ATOMIC_OP)
@@ -1454,8 +1452,8 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kStateValues:
case IrOpcode::kObjectState:
return;
- case IrOpcode::kAbortCSAAssert:
- VisitAbortCSAAssert(node);
+ case IrOpcode::kAbortCSADcheck:
+ VisitAbortCSADcheck(node);
return;
case IrOpcode::kDebugBreak:
VisitDebugBreak(node);
@@ -2786,7 +2784,7 @@ namespace {
LinkageLocation ExceptionLocation() {
return LinkageLocation::ForRegister(kReturnRegister0.code(),
- MachineType::IntPtr());
+ MachineType::TaggedPointer());
}
constexpr InstructionCode EncodeCallDescriptorFlags(
@@ -2916,16 +2914,20 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
InstructionCode opcode;
switch (call_descriptor->kind()) {
case CallDescriptor::kCallAddress: {
- int misc_field = static_cast<int>(call_descriptor->ParameterCount());
+ int gp_param_count =
+ static_cast<int>(call_descriptor->GPParameterCount());
+ int fp_param_count =
+ static_cast<int>(call_descriptor->FPParameterCount());
#if ABI_USES_FUNCTION_DESCRIPTORS
- // Highest misc_field bit is used on AIX to indicate if a CFunction call
- // has function descriptor or not.
- STATIC_ASSERT(MiscField::kSize == kHasFunctionDescriptorBitShift + 1);
+ // Highest fp_param_count bit is used on AIX to indicate if a CFunction
+ // call has function descriptor or not.
+ STATIC_ASSERT(FPParamField::kSize == kHasFunctionDescriptorBitShift + 1);
if (!call_descriptor->NoFunctionDescriptor()) {
- misc_field |= 1 << kHasFunctionDescriptorBitShift;
+ fp_param_count |= 1 << kHasFunctionDescriptorBitShift;
}
#endif
- opcode = kArchCallCFunction | MiscField::encode(misc_field);
+ opcode = kArchCallCFunction | ParamField::encode(gp_param_count) |
+ FPParamField::encode(fp_param_count);
break;
}
case CallDescriptor::kCallCodeObject:
diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc
index 0da8e054ae..a5c008bad5 100644
--- a/deps/v8/src/compiler/backend/instruction.cc
+++ b/deps/v8/src/compiler/backend/instruction.cc
@@ -7,7 +7,9 @@
#include <cstddef>
#include <iomanip>
+#include "src/codegen/aligned-slot-allocator.h"
#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/machine-type.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/source-position.h"
#include "src/compiler/common-operator.h"
@@ -77,10 +79,15 @@ FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
}
bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
- if (kSimpleFPAliasing || !this->IsFPLocationOperand() ||
- !other.IsFPLocationOperand())
+ const bool kComplexFPAliasing = !kSimpleFPAliasing &&
+ this->IsFPLocationOperand() &&
+ other.IsFPLocationOperand();
+ const bool kComplexS128SlotAliasing =
+ (this->IsSimd128StackSlot() && other.IsAnyStackSlot()) ||
+ (other.IsSimd128StackSlot() && this->IsAnyStackSlot());
+ if (!kComplexFPAliasing && !kComplexS128SlotAliasing) {
return EqualsCanonicalized(other);
- // Aliasing is complex and both operands are fp locations.
+ }
const LocationOperand& loc = *LocationOperand::cast(this);
const LocationOperand& other_loc = LocationOperand::cast(other);
LocationOperand::LocationKind kind = loc.location_kind();
@@ -88,22 +95,29 @@ bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
if (kind != other_kind) return false;
MachineRepresentation rep = loc.representation();
MachineRepresentation other_rep = other_loc.representation();
- if (rep == other_rep) return EqualsCanonicalized(other);
- if (kind == LocationOperand::REGISTER) {
- // FP register-register interference.
- return GetRegConfig()->AreAliases(rep, loc.register_code(), other_rep,
- other_loc.register_code());
+
+ if (kComplexFPAliasing && !kComplexS128SlotAliasing) {
+ if (rep == other_rep) return EqualsCanonicalized(other);
+ if (kind == LocationOperand::REGISTER) {
+ // FP register-register interference.
+ return GetRegConfig()->AreAliases(rep, loc.register_code(), other_rep,
+ other_loc.register_code());
+ }
}
- // FP slot-slot interference. Slots of different FP reps can alias because
- // the gap resolver may break a move into 2 or 4 equivalent smaller moves.
+
+ // Complex multi-slot operand interference:
+ // - slots of different FP reps can alias because the gap resolver may break a
+ // move into 2 or 4 equivalent smaller moves,
+ // - stack layout can be rearranged for tail calls
DCHECK_EQ(LocationOperand::STACK_SLOT, kind);
int index_hi = loc.index();
int index_lo =
- index_hi - (1 << ElementSizeLog2Of(rep)) / kSystemPointerSize + 1;
+ index_hi -
+ AlignedSlotAllocator::NumSlotsForWidth(ElementSizeInBytes(rep)) + 1;
int other_index_hi = other_loc.index();
int other_index_lo =
other_index_hi -
- (1 << ElementSizeLog2Of(other_rep)) / kSystemPointerSize + 1;
+ AlignedSlotAllocator::NumSlotsForWidth(ElementSizeInBytes(other_rep)) + 1;
return other_index_hi >= index_lo && index_hi >= other_index_lo;
}
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index 8698ed8a98..7372a5160d 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -882,6 +882,13 @@ class V8_EXPORT_PRIVATE Instruction final {
return FlagsConditionField::decode(opcode());
}
int misc() const { return MiscField::decode(opcode()); }
+ bool HasMemoryAccessMode() const {
+ return compiler::HasMemoryAccessMode(arch_opcode());
+ }
+ MemoryAccessMode memory_access_mode() const {
+ DCHECK(HasMemoryAccessMode());
+ return AccessModeField::decode(opcode());
+ }
static Instruction* New(Zone* zone, InstructionCode opcode) {
return New(zone, opcode, 0, nullptr, 0, nullptr, 0, nullptr);
diff --git a/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc
index 0397a36145..33226126cd 100644
--- a/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc
+++ b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc
@@ -748,13 +748,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == a0);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ stop();
diff --git a/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h b/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h
index f31818cac2..e38d37451d 100644
--- a/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h
+++ b/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h
@@ -11,365 +11,370 @@ namespace compiler {
// LOONG64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(Loong64Add_d) \
- V(Loong64Add_w) \
- V(Loong64AddOvf_d) \
- V(Loong64Sub_d) \
- V(Loong64Sub_w) \
- V(Loong64SubOvf_d) \
- V(Loong64Mul_d) \
- V(Loong64MulOvf_w) \
- V(Loong64Mulh_d) \
- V(Loong64Mulh_w) \
- V(Loong64Mulh_wu) \
- V(Loong64Mul_w) \
- V(Loong64Div_d) \
- V(Loong64Div_w) \
- V(Loong64Div_du) \
- V(Loong64Div_wu) \
- V(Loong64Mod_d) \
- V(Loong64Mod_w) \
- V(Loong64Mod_du) \
- V(Loong64Mod_wu) \
- V(Loong64And) \
- V(Loong64And32) \
- V(Loong64Or) \
- V(Loong64Or32) \
- V(Loong64Nor) \
- V(Loong64Nor32) \
- V(Loong64Xor) \
- V(Loong64Xor32) \
- V(Loong64Alsl_d) \
- V(Loong64Alsl_w) \
- V(Loong64Sll_d) \
- V(Loong64Sll_w) \
- V(Loong64Srl_d) \
- V(Loong64Srl_w) \
- V(Loong64Sra_d) \
- V(Loong64Sra_w) \
- V(Loong64Rotr_d) \
- V(Loong64Rotr_w) \
- V(Loong64Bstrpick_d) \
- V(Loong64Bstrpick_w) \
- V(Loong64Bstrins_d) \
- V(Loong64Bstrins_w) \
- V(Loong64ByteSwap64) \
- V(Loong64ByteSwap32) \
- V(Loong64Clz_d) \
- V(Loong64Clz_w) \
- V(Loong64Mov) \
- V(Loong64Tst) \
- V(Loong64Cmp) \
- V(Loong64Float32Cmp) \
- V(Loong64Float32Add) \
- V(Loong64Float32Sub) \
- V(Loong64Float32Mul) \
- V(Loong64Float32Div) \
- V(Loong64Float32Abs) \
- V(Loong64Float32Neg) \
- V(Loong64Float32Sqrt) \
- V(Loong64Float32Max) \
- V(Loong64Float32Min) \
- V(Loong64Float32ToFloat64) \
- V(Loong64Float32RoundDown) \
- V(Loong64Float32RoundUp) \
- V(Loong64Float32RoundTruncate) \
- V(Loong64Float32RoundTiesEven) \
- V(Loong64Float32ToInt32) \
- V(Loong64Float32ToInt64) \
- V(Loong64Float32ToUint32) \
- V(Loong64Float32ToUint64) \
- V(Loong64Float64Cmp) \
- V(Loong64Float64Add) \
- V(Loong64Float64Sub) \
- V(Loong64Float64Mul) \
- V(Loong64Float64Div) \
- V(Loong64Float64Mod) \
- V(Loong64Float64Abs) \
- V(Loong64Float64Neg) \
- V(Loong64Float64Sqrt) \
- V(Loong64Float64Max) \
- V(Loong64Float64Min) \
- V(Loong64Float64ToFloat32) \
- V(Loong64Float64RoundDown) \
- V(Loong64Float64RoundUp) \
- V(Loong64Float64RoundTruncate) \
- V(Loong64Float64RoundTiesEven) \
- V(Loong64Float64ToInt32) \
- V(Loong64Float64ToInt64) \
- V(Loong64Float64ToUint32) \
- V(Loong64Float64ToUint64) \
- V(Loong64Int32ToFloat32) \
- V(Loong64Int32ToFloat64) \
- V(Loong64Int64ToFloat32) \
- V(Loong64Int64ToFloat64) \
- V(Loong64Uint32ToFloat32) \
- V(Loong64Uint32ToFloat64) \
- V(Loong64Uint64ToFloat32) \
- V(Loong64Uint64ToFloat64) \
- V(Loong64Float64ExtractLowWord32) \
- V(Loong64Float64ExtractHighWord32) \
- V(Loong64Float64InsertLowWord32) \
- V(Loong64Float64InsertHighWord32) \
- V(Loong64BitcastDL) \
- V(Loong64BitcastLD) \
- V(Loong64Float64SilenceNaN) \
- V(Loong64Ld_b) \
- V(Loong64Ld_bu) \
- V(Loong64St_b) \
- V(Loong64Ld_h) \
- V(Loong64Ld_hu) \
- V(Loong64St_h) \
- V(Loong64Ld_w) \
- V(Loong64Ld_wu) \
- V(Loong64St_w) \
- V(Loong64Ld_d) \
- V(Loong64St_d) \
- V(Loong64Fld_s) \
- V(Loong64Fst_s) \
- V(Loong64Fld_d) \
- V(Loong64Fst_d) \
- V(Loong64Push) \
- V(Loong64Peek) \
- V(Loong64Poke) \
- V(Loong64StackClaim) \
- V(Loong64Ext_w_b) \
- V(Loong64Ext_w_h) \
- V(Loong64Dbar) \
- V(Loong64S128Const) \
- V(Loong64S128Zero) \
- V(Loong64S128AllOnes) \
- V(Loong64I32x4Splat) \
- V(Loong64I32x4ExtractLane) \
- V(Loong64I32x4ReplaceLane) \
- V(Loong64I32x4Add) \
- V(Loong64I32x4Sub) \
- V(Loong64F64x2Abs) \
- V(Loong64F64x2Neg) \
- V(Loong64F32x4Splat) \
- V(Loong64F32x4ExtractLane) \
- V(Loong64F32x4ReplaceLane) \
- V(Loong64F32x4SConvertI32x4) \
- V(Loong64F32x4UConvertI32x4) \
- V(Loong64I32x4Mul) \
- V(Loong64I32x4MaxS) \
- V(Loong64I32x4MinS) \
- V(Loong64I32x4Eq) \
- V(Loong64I32x4Ne) \
- V(Loong64I32x4Shl) \
- V(Loong64I32x4ShrS) \
- V(Loong64I32x4ShrU) \
- V(Loong64I32x4MaxU) \
- V(Loong64I32x4MinU) \
- V(Loong64F64x2Sqrt) \
- V(Loong64F64x2Add) \
- V(Loong64F64x2Sub) \
- V(Loong64F64x2Mul) \
- V(Loong64F64x2Div) \
- V(Loong64F64x2Min) \
- V(Loong64F64x2Max) \
- V(Loong64F64x2Eq) \
- V(Loong64F64x2Ne) \
- V(Loong64F64x2Lt) \
- V(Loong64F64x2Le) \
- V(Loong64F64x2Splat) \
- V(Loong64F64x2ExtractLane) \
- V(Loong64F64x2ReplaceLane) \
- V(Loong64F64x2Pmin) \
- V(Loong64F64x2Pmax) \
- V(Loong64F64x2Ceil) \
- V(Loong64F64x2Floor) \
- V(Loong64F64x2Trunc) \
- V(Loong64F64x2NearestInt) \
- V(Loong64F64x2ConvertLowI32x4S) \
- V(Loong64F64x2ConvertLowI32x4U) \
- V(Loong64F64x2PromoteLowF32x4) \
- V(Loong64I64x2Splat) \
- V(Loong64I64x2ExtractLane) \
- V(Loong64I64x2ReplaceLane) \
- V(Loong64I64x2Add) \
- V(Loong64I64x2Sub) \
- V(Loong64I64x2Mul) \
- V(Loong64I64x2Neg) \
- V(Loong64I64x2Shl) \
- V(Loong64I64x2ShrS) \
- V(Loong64I64x2ShrU) \
- V(Loong64I64x2BitMask) \
- V(Loong64I64x2Eq) \
- V(Loong64I64x2Ne) \
- V(Loong64I64x2GtS) \
- V(Loong64I64x2GeS) \
- V(Loong64I64x2Abs) \
- V(Loong64I64x2SConvertI32x4Low) \
- V(Loong64I64x2SConvertI32x4High) \
- V(Loong64I64x2UConvertI32x4Low) \
- V(Loong64I64x2UConvertI32x4High) \
- V(Loong64ExtMulLow) \
- V(Loong64ExtMulHigh) \
- V(Loong64ExtAddPairwise) \
- V(Loong64F32x4Abs) \
- V(Loong64F32x4Neg) \
- V(Loong64F32x4Sqrt) \
- V(Loong64F32x4RecipApprox) \
- V(Loong64F32x4RecipSqrtApprox) \
- V(Loong64F32x4Add) \
- V(Loong64F32x4Sub) \
- V(Loong64F32x4Mul) \
- V(Loong64F32x4Div) \
- V(Loong64F32x4Max) \
- V(Loong64F32x4Min) \
- V(Loong64F32x4Eq) \
- V(Loong64F32x4Ne) \
- V(Loong64F32x4Lt) \
- V(Loong64F32x4Le) \
- V(Loong64F32x4Pmin) \
- V(Loong64F32x4Pmax) \
- V(Loong64F32x4Ceil) \
- V(Loong64F32x4Floor) \
- V(Loong64F32x4Trunc) \
- V(Loong64F32x4NearestInt) \
- V(Loong64F32x4DemoteF64x2Zero) \
- V(Loong64I32x4SConvertF32x4) \
- V(Loong64I32x4UConvertF32x4) \
- V(Loong64I32x4Neg) \
- V(Loong64I32x4GtS) \
- V(Loong64I32x4GeS) \
- V(Loong64I32x4GtU) \
- V(Loong64I32x4GeU) \
- V(Loong64I32x4Abs) \
- V(Loong64I32x4BitMask) \
- V(Loong64I32x4DotI16x8S) \
- V(Loong64I32x4TruncSatF64x2SZero) \
- V(Loong64I32x4TruncSatF64x2UZero) \
- V(Loong64I16x8Splat) \
- V(Loong64I16x8ExtractLaneU) \
- V(Loong64I16x8ExtractLaneS) \
- V(Loong64I16x8ReplaceLane) \
- V(Loong64I16x8Neg) \
- V(Loong64I16x8Shl) \
- V(Loong64I16x8ShrS) \
- V(Loong64I16x8ShrU) \
- V(Loong64I16x8Add) \
- V(Loong64I16x8AddSatS) \
- V(Loong64I16x8Sub) \
- V(Loong64I16x8SubSatS) \
- V(Loong64I16x8Mul) \
- V(Loong64I16x8MaxS) \
- V(Loong64I16x8MinS) \
- V(Loong64I16x8Eq) \
- V(Loong64I16x8Ne) \
- V(Loong64I16x8GtS) \
- V(Loong64I16x8GeS) \
- V(Loong64I16x8AddSatU) \
- V(Loong64I16x8SubSatU) \
- V(Loong64I16x8MaxU) \
- V(Loong64I16x8MinU) \
- V(Loong64I16x8GtU) \
- V(Loong64I16x8GeU) \
- V(Loong64I16x8RoundingAverageU) \
- V(Loong64I16x8Abs) \
- V(Loong64I16x8BitMask) \
- V(Loong64I16x8Q15MulRSatS) \
- V(Loong64I8x16Splat) \
- V(Loong64I8x16ExtractLaneU) \
- V(Loong64I8x16ExtractLaneS) \
- V(Loong64I8x16ReplaceLane) \
- V(Loong64I8x16Neg) \
- V(Loong64I8x16Shl) \
- V(Loong64I8x16ShrS) \
- V(Loong64I8x16Add) \
- V(Loong64I8x16AddSatS) \
- V(Loong64I8x16Sub) \
- V(Loong64I8x16SubSatS) \
- V(Loong64I8x16MaxS) \
- V(Loong64I8x16MinS) \
- V(Loong64I8x16Eq) \
- V(Loong64I8x16Ne) \
- V(Loong64I8x16GtS) \
- V(Loong64I8x16GeS) \
- V(Loong64I8x16ShrU) \
- V(Loong64I8x16AddSatU) \
- V(Loong64I8x16SubSatU) \
- V(Loong64I8x16MaxU) \
- V(Loong64I8x16MinU) \
- V(Loong64I8x16GtU) \
- V(Loong64I8x16GeU) \
- V(Loong64I8x16RoundingAverageU) \
- V(Loong64I8x16Abs) \
- V(Loong64I8x16Popcnt) \
- V(Loong64I8x16BitMask) \
- V(Loong64S128And) \
- V(Loong64S128Or) \
- V(Loong64S128Xor) \
- V(Loong64S128Not) \
- V(Loong64S128Select) \
- V(Loong64S128AndNot) \
- V(Loong64I64x2AllTrue) \
- V(Loong64I32x4AllTrue) \
- V(Loong64I16x8AllTrue) \
- V(Loong64I8x16AllTrue) \
- V(Loong64V128AnyTrue) \
- V(Loong64S32x4InterleaveRight) \
- V(Loong64S32x4InterleaveLeft) \
- V(Loong64S32x4PackEven) \
- V(Loong64S32x4PackOdd) \
- V(Loong64S32x4InterleaveEven) \
- V(Loong64S32x4InterleaveOdd) \
- V(Loong64S32x4Shuffle) \
- V(Loong64S16x8InterleaveRight) \
- V(Loong64S16x8InterleaveLeft) \
- V(Loong64S16x8PackEven) \
- V(Loong64S16x8PackOdd) \
- V(Loong64S16x8InterleaveEven) \
- V(Loong64S16x8InterleaveOdd) \
- V(Loong64S16x4Reverse) \
- V(Loong64S16x2Reverse) \
- V(Loong64S8x16InterleaveRight) \
- V(Loong64S8x16InterleaveLeft) \
- V(Loong64S8x16PackEven) \
- V(Loong64S8x16PackOdd) \
- V(Loong64S8x16InterleaveEven) \
- V(Loong64S8x16InterleaveOdd) \
- V(Loong64I8x16Shuffle) \
- V(Loong64I8x16Swizzle) \
- V(Loong64S8x16Concat) \
- V(Loong64S8x8Reverse) \
- V(Loong64S8x4Reverse) \
- V(Loong64S8x2Reverse) \
- V(Loong64S128LoadSplat) \
- V(Loong64S128Load8x8S) \
- V(Loong64S128Load8x8U) \
- V(Loong64S128Load16x4S) \
- V(Loong64S128Load16x4U) \
- V(Loong64S128Load32x2S) \
- V(Loong64S128Load32x2U) \
- V(Loong64S128Load32Zero) \
- V(Loong64S128Load64Zero) \
- V(Loong64LoadLane) \
- V(Loong64StoreLane) \
- V(Loong64I32x4SConvertI16x8Low) \
- V(Loong64I32x4SConvertI16x8High) \
- V(Loong64I32x4UConvertI16x8Low) \
- V(Loong64I32x4UConvertI16x8High) \
- V(Loong64I16x8SConvertI8x16Low) \
- V(Loong64I16x8SConvertI8x16High) \
- V(Loong64I16x8SConvertI32x4) \
- V(Loong64I16x8UConvertI32x4) \
- V(Loong64I16x8UConvertI8x16Low) \
- V(Loong64I16x8UConvertI8x16High) \
- V(Loong64I8x16SConvertI16x8) \
- V(Loong64I8x16UConvertI16x8) \
- V(Loong64StoreCompressTagged) \
- V(Loong64Word64AtomicLoadUint32) \
- V(Loong64Word64AtomicLoadUint64) \
- V(Loong64Word64AtomicStoreWord64) \
- V(Loong64Word64AtomicAddUint64) \
- V(Loong64Word64AtomicSubUint64) \
- V(Loong64Word64AtomicAndUint64) \
- V(Loong64Word64AtomicOrUint64) \
- V(Loong64Word64AtomicXorUint64) \
- V(Loong64Word64AtomicExchangeUint64) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(Loong64Add_d) \
+ V(Loong64Add_w) \
+ V(Loong64AddOvf_d) \
+ V(Loong64Sub_d) \
+ V(Loong64Sub_w) \
+ V(Loong64SubOvf_d) \
+ V(Loong64Mul_d) \
+ V(Loong64MulOvf_w) \
+ V(Loong64Mulh_d) \
+ V(Loong64Mulh_w) \
+ V(Loong64Mulh_wu) \
+ V(Loong64Mul_w) \
+ V(Loong64Div_d) \
+ V(Loong64Div_w) \
+ V(Loong64Div_du) \
+ V(Loong64Div_wu) \
+ V(Loong64Mod_d) \
+ V(Loong64Mod_w) \
+ V(Loong64Mod_du) \
+ V(Loong64Mod_wu) \
+ V(Loong64And) \
+ V(Loong64And32) \
+ V(Loong64Or) \
+ V(Loong64Or32) \
+ V(Loong64Nor) \
+ V(Loong64Nor32) \
+ V(Loong64Xor) \
+ V(Loong64Xor32) \
+ V(Loong64Alsl_d) \
+ V(Loong64Alsl_w) \
+ V(Loong64Sll_d) \
+ V(Loong64Sll_w) \
+ V(Loong64Srl_d) \
+ V(Loong64Srl_w) \
+ V(Loong64Sra_d) \
+ V(Loong64Sra_w) \
+ V(Loong64Rotr_d) \
+ V(Loong64Rotr_w) \
+ V(Loong64Bstrpick_d) \
+ V(Loong64Bstrpick_w) \
+ V(Loong64Bstrins_d) \
+ V(Loong64Bstrins_w) \
+ V(Loong64ByteSwap64) \
+ V(Loong64ByteSwap32) \
+ V(Loong64Clz_d) \
+ V(Loong64Clz_w) \
+ V(Loong64Mov) \
+ V(Loong64Tst) \
+ V(Loong64Cmp) \
+ V(Loong64Float32Cmp) \
+ V(Loong64Float32Add) \
+ V(Loong64Float32Sub) \
+ V(Loong64Float32Mul) \
+ V(Loong64Float32Div) \
+ V(Loong64Float32Abs) \
+ V(Loong64Float32Neg) \
+ V(Loong64Float32Sqrt) \
+ V(Loong64Float32Max) \
+ V(Loong64Float32Min) \
+ V(Loong64Float32ToFloat64) \
+ V(Loong64Float32RoundDown) \
+ V(Loong64Float32RoundUp) \
+ V(Loong64Float32RoundTruncate) \
+ V(Loong64Float32RoundTiesEven) \
+ V(Loong64Float32ToInt32) \
+ V(Loong64Float32ToInt64) \
+ V(Loong64Float32ToUint32) \
+ V(Loong64Float32ToUint64) \
+ V(Loong64Float64Cmp) \
+ V(Loong64Float64Add) \
+ V(Loong64Float64Sub) \
+ V(Loong64Float64Mul) \
+ V(Loong64Float64Div) \
+ V(Loong64Float64Mod) \
+ V(Loong64Float64Abs) \
+ V(Loong64Float64Neg) \
+ V(Loong64Float64Sqrt) \
+ V(Loong64Float64Max) \
+ V(Loong64Float64Min) \
+ V(Loong64Float64ToFloat32) \
+ V(Loong64Float64RoundDown) \
+ V(Loong64Float64RoundUp) \
+ V(Loong64Float64RoundTruncate) \
+ V(Loong64Float64RoundTiesEven) \
+ V(Loong64Float64ToInt32) \
+ V(Loong64Float64ToInt64) \
+ V(Loong64Float64ToUint32) \
+ V(Loong64Float64ToUint64) \
+ V(Loong64Int32ToFloat32) \
+ V(Loong64Int32ToFloat64) \
+ V(Loong64Int64ToFloat32) \
+ V(Loong64Int64ToFloat64) \
+ V(Loong64Uint32ToFloat32) \
+ V(Loong64Uint32ToFloat64) \
+ V(Loong64Uint64ToFloat32) \
+ V(Loong64Uint64ToFloat64) \
+ V(Loong64Float64ExtractLowWord32) \
+ V(Loong64Float64ExtractHighWord32) \
+ V(Loong64Float64InsertLowWord32) \
+ V(Loong64Float64InsertHighWord32) \
+ V(Loong64BitcastDL) \
+ V(Loong64BitcastLD) \
+ V(Loong64Float64SilenceNaN) \
+ V(Loong64Ld_b) \
+ V(Loong64Ld_bu) \
+ V(Loong64St_b) \
+ V(Loong64Ld_h) \
+ V(Loong64Ld_hu) \
+ V(Loong64St_h) \
+ V(Loong64Ld_w) \
+ V(Loong64Ld_wu) \
+ V(Loong64St_w) \
+ V(Loong64Ld_d) \
+ V(Loong64St_d) \
+ V(Loong64Fld_s) \
+ V(Loong64Fst_s) \
+ V(Loong64Fld_d) \
+ V(Loong64Fst_d) \
+ V(Loong64Push) \
+ V(Loong64Peek) \
+ V(Loong64Poke) \
+ V(Loong64StackClaim) \
+ V(Loong64Ext_w_b) \
+ V(Loong64Ext_w_h) \
+ V(Loong64Dbar) \
+ V(Loong64S128Const) \
+ V(Loong64S128Zero) \
+ V(Loong64S128AllOnes) \
+ V(Loong64I32x4Splat) \
+ V(Loong64I32x4ExtractLane) \
+ V(Loong64I32x4ReplaceLane) \
+ V(Loong64I32x4Add) \
+ V(Loong64I32x4Sub) \
+ V(Loong64F64x2Abs) \
+ V(Loong64F64x2Neg) \
+ V(Loong64F32x4Splat) \
+ V(Loong64F32x4ExtractLane) \
+ V(Loong64F32x4ReplaceLane) \
+ V(Loong64F32x4SConvertI32x4) \
+ V(Loong64F32x4UConvertI32x4) \
+ V(Loong64I32x4Mul) \
+ V(Loong64I32x4MaxS) \
+ V(Loong64I32x4MinS) \
+ V(Loong64I32x4Eq) \
+ V(Loong64I32x4Ne) \
+ V(Loong64I32x4Shl) \
+ V(Loong64I32x4ShrS) \
+ V(Loong64I32x4ShrU) \
+ V(Loong64I32x4MaxU) \
+ V(Loong64I32x4MinU) \
+ V(Loong64F64x2Sqrt) \
+ V(Loong64F64x2Add) \
+ V(Loong64F64x2Sub) \
+ V(Loong64F64x2Mul) \
+ V(Loong64F64x2Div) \
+ V(Loong64F64x2Min) \
+ V(Loong64F64x2Max) \
+ V(Loong64F64x2Eq) \
+ V(Loong64F64x2Ne) \
+ V(Loong64F64x2Lt) \
+ V(Loong64F64x2Le) \
+ V(Loong64F64x2Splat) \
+ V(Loong64F64x2ExtractLane) \
+ V(Loong64F64x2ReplaceLane) \
+ V(Loong64F64x2Pmin) \
+ V(Loong64F64x2Pmax) \
+ V(Loong64F64x2Ceil) \
+ V(Loong64F64x2Floor) \
+ V(Loong64F64x2Trunc) \
+ V(Loong64F64x2NearestInt) \
+ V(Loong64F64x2ConvertLowI32x4S) \
+ V(Loong64F64x2ConvertLowI32x4U) \
+ V(Loong64F64x2PromoteLowF32x4) \
+ V(Loong64I64x2Splat) \
+ V(Loong64I64x2ExtractLane) \
+ V(Loong64I64x2ReplaceLane) \
+ V(Loong64I64x2Add) \
+ V(Loong64I64x2Sub) \
+ V(Loong64I64x2Mul) \
+ V(Loong64I64x2Neg) \
+ V(Loong64I64x2Shl) \
+ V(Loong64I64x2ShrS) \
+ V(Loong64I64x2ShrU) \
+ V(Loong64I64x2BitMask) \
+ V(Loong64I64x2Eq) \
+ V(Loong64I64x2Ne) \
+ V(Loong64I64x2GtS) \
+ V(Loong64I64x2GeS) \
+ V(Loong64I64x2Abs) \
+ V(Loong64I64x2SConvertI32x4Low) \
+ V(Loong64I64x2SConvertI32x4High) \
+ V(Loong64I64x2UConvertI32x4Low) \
+ V(Loong64I64x2UConvertI32x4High) \
+ V(Loong64ExtMulLow) \
+ V(Loong64ExtMulHigh) \
+ V(Loong64ExtAddPairwise) \
+ V(Loong64F32x4Abs) \
+ V(Loong64F32x4Neg) \
+ V(Loong64F32x4Sqrt) \
+ V(Loong64F32x4RecipApprox) \
+ V(Loong64F32x4RecipSqrtApprox) \
+ V(Loong64F32x4Add) \
+ V(Loong64F32x4Sub) \
+ V(Loong64F32x4Mul) \
+ V(Loong64F32x4Div) \
+ V(Loong64F32x4Max) \
+ V(Loong64F32x4Min) \
+ V(Loong64F32x4Eq) \
+ V(Loong64F32x4Ne) \
+ V(Loong64F32x4Lt) \
+ V(Loong64F32x4Le) \
+ V(Loong64F32x4Pmin) \
+ V(Loong64F32x4Pmax) \
+ V(Loong64F32x4Ceil) \
+ V(Loong64F32x4Floor) \
+ V(Loong64F32x4Trunc) \
+ V(Loong64F32x4NearestInt) \
+ V(Loong64F32x4DemoteF64x2Zero) \
+ V(Loong64I32x4SConvertF32x4) \
+ V(Loong64I32x4UConvertF32x4) \
+ V(Loong64I32x4Neg) \
+ V(Loong64I32x4GtS) \
+ V(Loong64I32x4GeS) \
+ V(Loong64I32x4GtU) \
+ V(Loong64I32x4GeU) \
+ V(Loong64I32x4Abs) \
+ V(Loong64I32x4BitMask) \
+ V(Loong64I32x4DotI16x8S) \
+ V(Loong64I32x4TruncSatF64x2SZero) \
+ V(Loong64I32x4TruncSatF64x2UZero) \
+ V(Loong64I16x8Splat) \
+ V(Loong64I16x8ExtractLaneU) \
+ V(Loong64I16x8ExtractLaneS) \
+ V(Loong64I16x8ReplaceLane) \
+ V(Loong64I16x8Neg) \
+ V(Loong64I16x8Shl) \
+ V(Loong64I16x8ShrS) \
+ V(Loong64I16x8ShrU) \
+ V(Loong64I16x8Add) \
+ V(Loong64I16x8AddSatS) \
+ V(Loong64I16x8Sub) \
+ V(Loong64I16x8SubSatS) \
+ V(Loong64I16x8Mul) \
+ V(Loong64I16x8MaxS) \
+ V(Loong64I16x8MinS) \
+ V(Loong64I16x8Eq) \
+ V(Loong64I16x8Ne) \
+ V(Loong64I16x8GtS) \
+ V(Loong64I16x8GeS) \
+ V(Loong64I16x8AddSatU) \
+ V(Loong64I16x8SubSatU) \
+ V(Loong64I16x8MaxU) \
+ V(Loong64I16x8MinU) \
+ V(Loong64I16x8GtU) \
+ V(Loong64I16x8GeU) \
+ V(Loong64I16x8RoundingAverageU) \
+ V(Loong64I16x8Abs) \
+ V(Loong64I16x8BitMask) \
+ V(Loong64I16x8Q15MulRSatS) \
+ V(Loong64I8x16Splat) \
+ V(Loong64I8x16ExtractLaneU) \
+ V(Loong64I8x16ExtractLaneS) \
+ V(Loong64I8x16ReplaceLane) \
+ V(Loong64I8x16Neg) \
+ V(Loong64I8x16Shl) \
+ V(Loong64I8x16ShrS) \
+ V(Loong64I8x16Add) \
+ V(Loong64I8x16AddSatS) \
+ V(Loong64I8x16Sub) \
+ V(Loong64I8x16SubSatS) \
+ V(Loong64I8x16MaxS) \
+ V(Loong64I8x16MinS) \
+ V(Loong64I8x16Eq) \
+ V(Loong64I8x16Ne) \
+ V(Loong64I8x16GtS) \
+ V(Loong64I8x16GeS) \
+ V(Loong64I8x16ShrU) \
+ V(Loong64I8x16AddSatU) \
+ V(Loong64I8x16SubSatU) \
+ V(Loong64I8x16MaxU) \
+ V(Loong64I8x16MinU) \
+ V(Loong64I8x16GtU) \
+ V(Loong64I8x16GeU) \
+ V(Loong64I8x16RoundingAverageU) \
+ V(Loong64I8x16Abs) \
+ V(Loong64I8x16Popcnt) \
+ V(Loong64I8x16BitMask) \
+ V(Loong64S128And) \
+ V(Loong64S128Or) \
+ V(Loong64S128Xor) \
+ V(Loong64S128Not) \
+ V(Loong64S128Select) \
+ V(Loong64S128AndNot) \
+ V(Loong64I64x2AllTrue) \
+ V(Loong64I32x4AllTrue) \
+ V(Loong64I16x8AllTrue) \
+ V(Loong64I8x16AllTrue) \
+ V(Loong64V128AnyTrue) \
+ V(Loong64S32x4InterleaveRight) \
+ V(Loong64S32x4InterleaveLeft) \
+ V(Loong64S32x4PackEven) \
+ V(Loong64S32x4PackOdd) \
+ V(Loong64S32x4InterleaveEven) \
+ V(Loong64S32x4InterleaveOdd) \
+ V(Loong64S32x4Shuffle) \
+ V(Loong64S16x8InterleaveRight) \
+ V(Loong64S16x8InterleaveLeft) \
+ V(Loong64S16x8PackEven) \
+ V(Loong64S16x8PackOdd) \
+ V(Loong64S16x8InterleaveEven) \
+ V(Loong64S16x8InterleaveOdd) \
+ V(Loong64S16x4Reverse) \
+ V(Loong64S16x2Reverse) \
+ V(Loong64S8x16InterleaveRight) \
+ V(Loong64S8x16InterleaveLeft) \
+ V(Loong64S8x16PackEven) \
+ V(Loong64S8x16PackOdd) \
+ V(Loong64S8x16InterleaveEven) \
+ V(Loong64S8x16InterleaveOdd) \
+ V(Loong64I8x16Shuffle) \
+ V(Loong64I8x16Swizzle) \
+ V(Loong64S8x16Concat) \
+ V(Loong64S8x8Reverse) \
+ V(Loong64S8x4Reverse) \
+ V(Loong64S8x2Reverse) \
+ V(Loong64S128LoadSplat) \
+ V(Loong64S128Load8x8S) \
+ V(Loong64S128Load8x8U) \
+ V(Loong64S128Load16x4S) \
+ V(Loong64S128Load16x4U) \
+ V(Loong64S128Load32x2S) \
+ V(Loong64S128Load32x2U) \
+ V(Loong64S128Load32Zero) \
+ V(Loong64S128Load64Zero) \
+ V(Loong64LoadLane) \
+ V(Loong64StoreLane) \
+ V(Loong64I32x4SConvertI16x8Low) \
+ V(Loong64I32x4SConvertI16x8High) \
+ V(Loong64I32x4UConvertI16x8Low) \
+ V(Loong64I32x4UConvertI16x8High) \
+ V(Loong64I16x8SConvertI8x16Low) \
+ V(Loong64I16x8SConvertI8x16High) \
+ V(Loong64I16x8SConvertI32x4) \
+ V(Loong64I16x8UConvertI32x4) \
+ V(Loong64I16x8UConvertI8x16Low) \
+ V(Loong64I16x8UConvertI8x16High) \
+ V(Loong64I8x16SConvertI16x8) \
+ V(Loong64I8x16UConvertI16x8) \
+ V(Loong64StoreCompressTagged) \
+ V(Loong64Word64AtomicLoadUint32) \
+ V(Loong64Word64AtomicLoadUint64) \
+ V(Loong64Word64AtomicStoreWord64) \
+ V(Loong64Word64AtomicAddUint64) \
+ V(Loong64Word64AtomicSubUint64) \
+ V(Loong64Word64AtomicAndUint64) \
+ V(Loong64Word64AtomicOrUint64) \
+ V(Loong64Word64AtomicXorUint64) \
+ V(Loong64Word64AtomicExchangeUint64) \
V(Loong64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
index 454bfa9986..29f9b111db 100644
--- a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
+++ b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
@@ -345,9 +345,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
Loong64OperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
}
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
@@ -1355,37 +1355,21 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
}
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+ // On LoongArch64, int32 values should all be sign-extended to 64-bit, so
+ // no need to sign-extend them here.
+ // But when call to a host function in simulator, if the function return an
+ // int32 value, the simulator do not sign-extend to int64, because in
+ // simulator we do not know the function whether return an int32 or int64.
#ifdef USE_SIMULATOR
Node* value = node->InputAt(0);
- if ((value->opcode() == IrOpcode::kLoad ||
- value->opcode() == IrOpcode::kLoadImmutable) &&
- CanCover(node, value)) {
- // Generate sign-extending load.
- LoadRepresentation load_rep = LoadRepresentationOf(value->op());
- InstructionCode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsUnsigned() ? kLoong64Ld_bu : kLoong64Ld_b;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsUnsigned() ? kLoong64Ld_hu : kLoong64Ld_h;
- break;
- case MachineRepresentation::kWord32:
- opcode = kLoong64Ld_w;
- break;
- default:
- UNREACHABLE();
- }
- EmitLoad(this, value, opcode, node);
- } else {
+ if (value->opcode() == IrOpcode::kCall) {
Loong64OperandGenerator g(this);
- Emit(kLoong64Sll_w, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.TempImmediate(0));
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(value),
+ g.TempImmediate(0));
+ return;
}
-#else
- EmitIdentity(node);
#endif
+ EmitIdentity(node);
}
bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index 736248c824..97c9e0978e 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -809,13 +809,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == a0);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ stop();
diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
index 40f1ef3e98..3f0d8f9d39 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
@@ -11,369 +11,374 @@ namespace compiler {
// MIPS-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(MipsAdd) \
- V(MipsAddOvf) \
- V(MipsSub) \
- V(MipsSubOvf) \
- V(MipsMul) \
- V(MipsMulOvf) \
- V(MipsMulHigh) \
- V(MipsMulHighU) \
- V(MipsDiv) \
- V(MipsDivU) \
- V(MipsMod) \
- V(MipsModU) \
- V(MipsAnd) \
- V(MipsOr) \
- V(MipsNor) \
- V(MipsXor) \
- V(MipsClz) \
- V(MipsCtz) \
- V(MipsPopcnt) \
- V(MipsLsa) \
- V(MipsShl) \
- V(MipsShr) \
- V(MipsSar) \
- V(MipsShlPair) \
- V(MipsShrPair) \
- V(MipsSarPair) \
- V(MipsExt) \
- V(MipsIns) \
- V(MipsRor) \
- V(MipsMov) \
- V(MipsTst) \
- V(MipsCmp) \
- V(MipsCmpS) \
- V(MipsAddS) \
- V(MipsSubS) \
- V(MipsMulS) \
- V(MipsDivS) \
- V(MipsAbsS) \
- V(MipsSqrtS) \
- V(MipsMaxS) \
- V(MipsMinS) \
- V(MipsCmpD) \
- V(MipsAddD) \
- V(MipsSubD) \
- V(MipsMulD) \
- V(MipsDivD) \
- V(MipsModD) \
- V(MipsAbsD) \
- V(MipsSqrtD) \
- V(MipsMaxD) \
- V(MipsMinD) \
- V(MipsNegS) \
- V(MipsNegD) \
- V(MipsAddPair) \
- V(MipsSubPair) \
- V(MipsMulPair) \
- V(MipsMaddS) \
- V(MipsMaddD) \
- V(MipsMsubS) \
- V(MipsMsubD) \
- V(MipsFloat32RoundDown) \
- V(MipsFloat32RoundTruncate) \
- V(MipsFloat32RoundUp) \
- V(MipsFloat32RoundTiesEven) \
- V(MipsFloat64RoundDown) \
- V(MipsFloat64RoundTruncate) \
- V(MipsFloat64RoundUp) \
- V(MipsFloat64RoundTiesEven) \
- V(MipsCvtSD) \
- V(MipsCvtDS) \
- V(MipsTruncWD) \
- V(MipsRoundWD) \
- V(MipsFloorWD) \
- V(MipsCeilWD) \
- V(MipsTruncWS) \
- V(MipsRoundWS) \
- V(MipsFloorWS) \
- V(MipsCeilWS) \
- V(MipsTruncUwD) \
- V(MipsTruncUwS) \
- V(MipsCvtDW) \
- V(MipsCvtDUw) \
- V(MipsCvtSW) \
- V(MipsCvtSUw) \
- V(MipsLb) \
- V(MipsLbu) \
- V(MipsSb) \
- V(MipsLh) \
- V(MipsUlh) \
- V(MipsLhu) \
- V(MipsUlhu) \
- V(MipsSh) \
- V(MipsUsh) \
- V(MipsLw) \
- V(MipsUlw) \
- V(MipsSw) \
- V(MipsUsw) \
- V(MipsLwc1) \
- V(MipsUlwc1) \
- V(MipsSwc1) \
- V(MipsUswc1) \
- V(MipsLdc1) \
- V(MipsUldc1) \
- V(MipsSdc1) \
- V(MipsUsdc1) \
- V(MipsFloat64ExtractLowWord32) \
- V(MipsFloat64ExtractHighWord32) \
- V(MipsFloat64InsertLowWord32) \
- V(MipsFloat64InsertHighWord32) \
- V(MipsFloat64SilenceNaN) \
- V(MipsFloat32Max) \
- V(MipsFloat64Max) \
- V(MipsFloat32Min) \
- V(MipsFloat64Min) \
- V(MipsPush) \
- V(MipsPeek) \
- V(MipsStoreToStackSlot) \
- V(MipsByteSwap32) \
- V(MipsStackClaim) \
- V(MipsSeb) \
- V(MipsSeh) \
- V(MipsSync) \
- V(MipsS128Zero) \
- V(MipsI32x4Splat) \
- V(MipsI32x4ExtractLane) \
- V(MipsI32x4ReplaceLane) \
- V(MipsI32x4Add) \
- V(MipsI32x4Sub) \
- V(MipsF64x2Abs) \
- V(MipsF64x2Neg) \
- V(MipsF64x2Sqrt) \
- V(MipsF64x2Add) \
- V(MipsF64x2Sub) \
- V(MipsF64x2Mul) \
- V(MipsF64x2Div) \
- V(MipsF64x2Min) \
- V(MipsF64x2Max) \
- V(MipsF64x2Eq) \
- V(MipsF64x2Ne) \
- V(MipsF64x2Lt) \
- V(MipsF64x2Le) \
- V(MipsF64x2Pmin) \
- V(MipsF64x2Pmax) \
- V(MipsF64x2Ceil) \
- V(MipsF64x2Floor) \
- V(MipsF64x2Trunc) \
- V(MipsF64x2NearestInt) \
- V(MipsF64x2ConvertLowI32x4S) \
- V(MipsF64x2ConvertLowI32x4U) \
- V(MipsF64x2PromoteLowF32x4) \
- V(MipsI64x2Add) \
- V(MipsI64x2Sub) \
- V(MipsI64x2Mul) \
- V(MipsI64x2Neg) \
- V(MipsI64x2Shl) \
- V(MipsI64x2ShrS) \
- V(MipsI64x2ShrU) \
- V(MipsI64x2BitMask) \
- V(MipsI64x2Eq) \
- V(MipsI64x2Ne) \
- V(MipsI64x2GtS) \
- V(MipsI64x2GeS) \
- V(MipsI64x2Abs) \
- V(MipsI64x2SConvertI32x4Low) \
- V(MipsI64x2SConvertI32x4High) \
- V(MipsI64x2UConvertI32x4Low) \
- V(MipsI64x2UConvertI32x4High) \
- V(MipsI64x2ExtMulLowI32x4S) \
- V(MipsI64x2ExtMulHighI32x4S) \
- V(MipsI64x2ExtMulLowI32x4U) \
- V(MipsI64x2ExtMulHighI32x4U) \
- V(MipsF32x4Splat) \
- V(MipsF32x4ExtractLane) \
- V(MipsF32x4ReplaceLane) \
- V(MipsF32x4SConvertI32x4) \
- V(MipsF32x4UConvertI32x4) \
- V(MipsF32x4DemoteF64x2Zero) \
- V(MipsI32x4Mul) \
- V(MipsI32x4MaxS) \
- V(MipsI32x4MinS) \
- V(MipsI32x4Eq) \
- V(MipsI32x4Ne) \
- V(MipsI32x4Shl) \
- V(MipsI32x4ShrS) \
- V(MipsI32x4ShrU) \
- V(MipsI32x4MaxU) \
- V(MipsI32x4MinU) \
- V(MipsF64x2Splat) \
- V(MipsF64x2ExtractLane) \
- V(MipsF64x2ReplaceLane) \
- V(MipsF32x4Abs) \
- V(MipsF32x4Neg) \
- V(MipsF32x4Sqrt) \
- V(MipsF32x4RecipApprox) \
- V(MipsF32x4RecipSqrtApprox) \
- V(MipsF32x4Add) \
- V(MipsF32x4Sub) \
- V(MipsF32x4Mul) \
- V(MipsF32x4Div) \
- V(MipsF32x4Max) \
- V(MipsF32x4Min) \
- V(MipsF32x4Eq) \
- V(MipsF32x4Ne) \
- V(MipsF32x4Lt) \
- V(MipsF32x4Le) \
- V(MipsF32x4Pmin) \
- V(MipsF32x4Pmax) \
- V(MipsF32x4Ceil) \
- V(MipsF32x4Floor) \
- V(MipsF32x4Trunc) \
- V(MipsF32x4NearestInt) \
- V(MipsI32x4SConvertF32x4) \
- V(MipsI32x4UConvertF32x4) \
- V(MipsI32x4Neg) \
- V(MipsI32x4GtS) \
- V(MipsI32x4GeS) \
- V(MipsI32x4GtU) \
- V(MipsI32x4GeU) \
- V(MipsI32x4Abs) \
- V(MipsI32x4BitMask) \
- V(MipsI32x4DotI16x8S) \
- V(MipsI32x4ExtMulLowI16x8S) \
- V(MipsI32x4ExtMulHighI16x8S) \
- V(MipsI32x4ExtMulLowI16x8U) \
- V(MipsI32x4ExtMulHighI16x8U) \
- V(MipsI32x4TruncSatF64x2SZero) \
- V(MipsI32x4TruncSatF64x2UZero) \
- V(MipsI32x4ExtAddPairwiseI16x8S) \
- V(MipsI32x4ExtAddPairwiseI16x8U) \
- V(MipsI16x8Splat) \
- V(MipsI16x8ExtractLaneU) \
- V(MipsI16x8ExtractLaneS) \
- V(MipsI16x8ReplaceLane) \
- V(MipsI16x8Neg) \
- V(MipsI16x8Shl) \
- V(MipsI16x8ShrS) \
- V(MipsI16x8ShrU) \
- V(MipsI16x8Add) \
- V(MipsI16x8AddSatS) \
- V(MipsI16x8Sub) \
- V(MipsI16x8SubSatS) \
- V(MipsI16x8Mul) \
- V(MipsI16x8MaxS) \
- V(MipsI16x8MinS) \
- V(MipsI16x8Eq) \
- V(MipsI16x8Ne) \
- V(MipsI16x8GtS) \
- V(MipsI16x8GeS) \
- V(MipsI16x8AddSatU) \
- V(MipsI16x8SubSatU) \
- V(MipsI16x8MaxU) \
- V(MipsI16x8MinU) \
- V(MipsI16x8GtU) \
- V(MipsI16x8GeU) \
- V(MipsI16x8RoundingAverageU) \
- V(MipsI16x8Abs) \
- V(MipsI16x8BitMask) \
- V(MipsI16x8Q15MulRSatS) \
- V(MipsI16x8ExtMulLowI8x16S) \
- V(MipsI16x8ExtMulHighI8x16S) \
- V(MipsI16x8ExtMulLowI8x16U) \
- V(MipsI16x8ExtMulHighI8x16U) \
- V(MipsI16x8ExtAddPairwiseI8x16S) \
- V(MipsI16x8ExtAddPairwiseI8x16U) \
- V(MipsI8x16Splat) \
- V(MipsI8x16ExtractLaneU) \
- V(MipsI8x16ExtractLaneS) \
- V(MipsI8x16ReplaceLane) \
- V(MipsI8x16Neg) \
- V(MipsI8x16Shl) \
- V(MipsI8x16ShrS) \
- V(MipsI8x16Add) \
- V(MipsI8x16AddSatS) \
- V(MipsI8x16Sub) \
- V(MipsI8x16SubSatS) \
- V(MipsI8x16MaxS) \
- V(MipsI8x16MinS) \
- V(MipsI8x16Eq) \
- V(MipsI8x16Ne) \
- V(MipsI8x16GtS) \
- V(MipsI8x16GeS) \
- V(MipsI8x16ShrU) \
- V(MipsI8x16AddSatU) \
- V(MipsI8x16SubSatU) \
- V(MipsI8x16MaxU) \
- V(MipsI8x16MinU) \
- V(MipsI8x16GtU) \
- V(MipsI8x16GeU) \
- V(MipsI8x16RoundingAverageU) \
- V(MipsI8x16Abs) \
- V(MipsI8x16Popcnt) \
- V(MipsI8x16BitMask) \
- V(MipsS128And) \
- V(MipsS128Or) \
- V(MipsS128Xor) \
- V(MipsS128Not) \
- V(MipsS128Select) \
- V(MipsS128AndNot) \
- V(MipsI64x2AllTrue) \
- V(MipsI32x4AllTrue) \
- V(MipsI16x8AllTrue) \
- V(MipsI8x16AllTrue) \
- V(MipsV128AnyTrue) \
- V(MipsS32x4InterleaveRight) \
- V(MipsS32x4InterleaveLeft) \
- V(MipsS32x4PackEven) \
- V(MipsS32x4PackOdd) \
- V(MipsS32x4InterleaveEven) \
- V(MipsS32x4InterleaveOdd) \
- V(MipsS32x4Shuffle) \
- V(MipsS16x8InterleaveRight) \
- V(MipsS16x8InterleaveLeft) \
- V(MipsS16x8PackEven) \
- V(MipsS16x8PackOdd) \
- V(MipsS16x8InterleaveEven) \
- V(MipsS16x8InterleaveOdd) \
- V(MipsS16x4Reverse) \
- V(MipsS16x2Reverse) \
- V(MipsS8x16InterleaveRight) \
- V(MipsS8x16InterleaveLeft) \
- V(MipsS8x16PackEven) \
- V(MipsS8x16PackOdd) \
- V(MipsS8x16InterleaveEven) \
- V(MipsS8x16InterleaveOdd) \
- V(MipsI8x16Shuffle) \
- V(MipsI8x16Swizzle) \
- V(MipsS8x16Concat) \
- V(MipsS8x8Reverse) \
- V(MipsS8x4Reverse) \
- V(MipsS8x2Reverse) \
- V(MipsS128Load8Splat) \
- V(MipsS128Load16Splat) \
- V(MipsS128Load32Splat) \
- V(MipsS128Load64Splat) \
- V(MipsS128Load8x8S) \
- V(MipsS128Load8x8U) \
- V(MipsS128Load16x4S) \
- V(MipsS128Load16x4U) \
- V(MipsS128Load32x2S) \
- V(MipsS128Load32x2U) \
- V(MipsMsaLd) \
- V(MipsMsaSt) \
- V(MipsI32x4SConvertI16x8Low) \
- V(MipsI32x4SConvertI16x8High) \
- V(MipsI32x4UConvertI16x8Low) \
- V(MipsI32x4UConvertI16x8High) \
- V(MipsI16x8SConvertI8x16Low) \
- V(MipsI16x8SConvertI8x16High) \
- V(MipsI16x8SConvertI32x4) \
- V(MipsI16x8UConvertI32x4) \
- V(MipsI16x8UConvertI8x16Low) \
- V(MipsI16x8UConvertI8x16High) \
- V(MipsI8x16SConvertI16x8) \
- V(MipsI8x16UConvertI16x8) \
- V(MipsWord32AtomicPairLoad) \
- V(MipsWord32AtomicPairStore) \
- V(MipsWord32AtomicPairAdd) \
- V(MipsWord32AtomicPairSub) \
- V(MipsWord32AtomicPairAnd) \
- V(MipsWord32AtomicPairOr) \
- V(MipsWord32AtomicPairXor) \
- V(MipsWord32AtomicPairExchange) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(MipsAdd) \
+ V(MipsAddOvf) \
+ V(MipsSub) \
+ V(MipsSubOvf) \
+ V(MipsMul) \
+ V(MipsMulOvf) \
+ V(MipsMulHigh) \
+ V(MipsMulHighU) \
+ V(MipsDiv) \
+ V(MipsDivU) \
+ V(MipsMod) \
+ V(MipsModU) \
+ V(MipsAnd) \
+ V(MipsOr) \
+ V(MipsNor) \
+ V(MipsXor) \
+ V(MipsClz) \
+ V(MipsCtz) \
+ V(MipsPopcnt) \
+ V(MipsLsa) \
+ V(MipsShl) \
+ V(MipsShr) \
+ V(MipsSar) \
+ V(MipsShlPair) \
+ V(MipsShrPair) \
+ V(MipsSarPair) \
+ V(MipsExt) \
+ V(MipsIns) \
+ V(MipsRor) \
+ V(MipsMov) \
+ V(MipsTst) \
+ V(MipsCmp) \
+ V(MipsCmpS) \
+ V(MipsAddS) \
+ V(MipsSubS) \
+ V(MipsMulS) \
+ V(MipsDivS) \
+ V(MipsAbsS) \
+ V(MipsSqrtS) \
+ V(MipsMaxS) \
+ V(MipsMinS) \
+ V(MipsCmpD) \
+ V(MipsAddD) \
+ V(MipsSubD) \
+ V(MipsMulD) \
+ V(MipsDivD) \
+ V(MipsModD) \
+ V(MipsAbsD) \
+ V(MipsSqrtD) \
+ V(MipsMaxD) \
+ V(MipsMinD) \
+ V(MipsNegS) \
+ V(MipsNegD) \
+ V(MipsAddPair) \
+ V(MipsSubPair) \
+ V(MipsMulPair) \
+ V(MipsMaddS) \
+ V(MipsMaddD) \
+ V(MipsMsubS) \
+ V(MipsMsubD) \
+ V(MipsFloat32RoundDown) \
+ V(MipsFloat32RoundTruncate) \
+ V(MipsFloat32RoundUp) \
+ V(MipsFloat32RoundTiesEven) \
+ V(MipsFloat64RoundDown) \
+ V(MipsFloat64RoundTruncate) \
+ V(MipsFloat64RoundUp) \
+ V(MipsFloat64RoundTiesEven) \
+ V(MipsCvtSD) \
+ V(MipsCvtDS) \
+ V(MipsTruncWD) \
+ V(MipsRoundWD) \
+ V(MipsFloorWD) \
+ V(MipsCeilWD) \
+ V(MipsTruncWS) \
+ V(MipsRoundWS) \
+ V(MipsFloorWS) \
+ V(MipsCeilWS) \
+ V(MipsTruncUwD) \
+ V(MipsTruncUwS) \
+ V(MipsCvtDW) \
+ V(MipsCvtDUw) \
+ V(MipsCvtSW) \
+ V(MipsCvtSUw) \
+ V(MipsLb) \
+ V(MipsLbu) \
+ V(MipsSb) \
+ V(MipsLh) \
+ V(MipsUlh) \
+ V(MipsLhu) \
+ V(MipsUlhu) \
+ V(MipsSh) \
+ V(MipsUsh) \
+ V(MipsLw) \
+ V(MipsUlw) \
+ V(MipsSw) \
+ V(MipsUsw) \
+ V(MipsLwc1) \
+ V(MipsUlwc1) \
+ V(MipsSwc1) \
+ V(MipsUswc1) \
+ V(MipsLdc1) \
+ V(MipsUldc1) \
+ V(MipsSdc1) \
+ V(MipsUsdc1) \
+ V(MipsFloat64ExtractLowWord32) \
+ V(MipsFloat64ExtractHighWord32) \
+ V(MipsFloat64InsertLowWord32) \
+ V(MipsFloat64InsertHighWord32) \
+ V(MipsFloat64SilenceNaN) \
+ V(MipsFloat32Max) \
+ V(MipsFloat64Max) \
+ V(MipsFloat32Min) \
+ V(MipsFloat64Min) \
+ V(MipsPush) \
+ V(MipsPeek) \
+ V(MipsStoreToStackSlot) \
+ V(MipsByteSwap32) \
+ V(MipsStackClaim) \
+ V(MipsSeb) \
+ V(MipsSeh) \
+ V(MipsSync) \
+ V(MipsS128Zero) \
+ V(MipsI32x4Splat) \
+ V(MipsI32x4ExtractLane) \
+ V(MipsI32x4ReplaceLane) \
+ V(MipsI32x4Add) \
+ V(MipsI32x4Sub) \
+ V(MipsF64x2Abs) \
+ V(MipsF64x2Neg) \
+ V(MipsF64x2Sqrt) \
+ V(MipsF64x2Add) \
+ V(MipsF64x2Sub) \
+ V(MipsF64x2Mul) \
+ V(MipsF64x2Div) \
+ V(MipsF64x2Min) \
+ V(MipsF64x2Max) \
+ V(MipsF64x2Eq) \
+ V(MipsF64x2Ne) \
+ V(MipsF64x2Lt) \
+ V(MipsF64x2Le) \
+ V(MipsF64x2Pmin) \
+ V(MipsF64x2Pmax) \
+ V(MipsF64x2Ceil) \
+ V(MipsF64x2Floor) \
+ V(MipsF64x2Trunc) \
+ V(MipsF64x2NearestInt) \
+ V(MipsF64x2ConvertLowI32x4S) \
+ V(MipsF64x2ConvertLowI32x4U) \
+ V(MipsF64x2PromoteLowF32x4) \
+ V(MipsI64x2Add) \
+ V(MipsI64x2Sub) \
+ V(MipsI64x2Mul) \
+ V(MipsI64x2Neg) \
+ V(MipsI64x2Shl) \
+ V(MipsI64x2ShrS) \
+ V(MipsI64x2ShrU) \
+ V(MipsI64x2BitMask) \
+ V(MipsI64x2Eq) \
+ V(MipsI64x2Ne) \
+ V(MipsI64x2GtS) \
+ V(MipsI64x2GeS) \
+ V(MipsI64x2Abs) \
+ V(MipsI64x2SConvertI32x4Low) \
+ V(MipsI64x2SConvertI32x4High) \
+ V(MipsI64x2UConvertI32x4Low) \
+ V(MipsI64x2UConvertI32x4High) \
+ V(MipsI64x2ExtMulLowI32x4S) \
+ V(MipsI64x2ExtMulHighI32x4S) \
+ V(MipsI64x2ExtMulLowI32x4U) \
+ V(MipsI64x2ExtMulHighI32x4U) \
+ V(MipsF32x4Splat) \
+ V(MipsF32x4ExtractLane) \
+ V(MipsF32x4ReplaceLane) \
+ V(MipsF32x4SConvertI32x4) \
+ V(MipsF32x4UConvertI32x4) \
+ V(MipsF32x4DemoteF64x2Zero) \
+ V(MipsI32x4Mul) \
+ V(MipsI32x4MaxS) \
+ V(MipsI32x4MinS) \
+ V(MipsI32x4Eq) \
+ V(MipsI32x4Ne) \
+ V(MipsI32x4Shl) \
+ V(MipsI32x4ShrS) \
+ V(MipsI32x4ShrU) \
+ V(MipsI32x4MaxU) \
+ V(MipsI32x4MinU) \
+ V(MipsF64x2Splat) \
+ V(MipsF64x2ExtractLane) \
+ V(MipsF64x2ReplaceLane) \
+ V(MipsF32x4Abs) \
+ V(MipsF32x4Neg) \
+ V(MipsF32x4Sqrt) \
+ V(MipsF32x4RecipApprox) \
+ V(MipsF32x4RecipSqrtApprox) \
+ V(MipsF32x4Add) \
+ V(MipsF32x4Sub) \
+ V(MipsF32x4Mul) \
+ V(MipsF32x4Div) \
+ V(MipsF32x4Max) \
+ V(MipsF32x4Min) \
+ V(MipsF32x4Eq) \
+ V(MipsF32x4Ne) \
+ V(MipsF32x4Lt) \
+ V(MipsF32x4Le) \
+ V(MipsF32x4Pmin) \
+ V(MipsF32x4Pmax) \
+ V(MipsF32x4Ceil) \
+ V(MipsF32x4Floor) \
+ V(MipsF32x4Trunc) \
+ V(MipsF32x4NearestInt) \
+ V(MipsI32x4SConvertF32x4) \
+ V(MipsI32x4UConvertF32x4) \
+ V(MipsI32x4Neg) \
+ V(MipsI32x4GtS) \
+ V(MipsI32x4GeS) \
+ V(MipsI32x4GtU) \
+ V(MipsI32x4GeU) \
+ V(MipsI32x4Abs) \
+ V(MipsI32x4BitMask) \
+ V(MipsI32x4DotI16x8S) \
+ V(MipsI32x4ExtMulLowI16x8S) \
+ V(MipsI32x4ExtMulHighI16x8S) \
+ V(MipsI32x4ExtMulLowI16x8U) \
+ V(MipsI32x4ExtMulHighI16x8U) \
+ V(MipsI32x4TruncSatF64x2SZero) \
+ V(MipsI32x4TruncSatF64x2UZero) \
+ V(MipsI32x4ExtAddPairwiseI16x8S) \
+ V(MipsI32x4ExtAddPairwiseI16x8U) \
+ V(MipsI16x8Splat) \
+ V(MipsI16x8ExtractLaneU) \
+ V(MipsI16x8ExtractLaneS) \
+ V(MipsI16x8ReplaceLane) \
+ V(MipsI16x8Neg) \
+ V(MipsI16x8Shl) \
+ V(MipsI16x8ShrS) \
+ V(MipsI16x8ShrU) \
+ V(MipsI16x8Add) \
+ V(MipsI16x8AddSatS) \
+ V(MipsI16x8Sub) \
+ V(MipsI16x8SubSatS) \
+ V(MipsI16x8Mul) \
+ V(MipsI16x8MaxS) \
+ V(MipsI16x8MinS) \
+ V(MipsI16x8Eq) \
+ V(MipsI16x8Ne) \
+ V(MipsI16x8GtS) \
+ V(MipsI16x8GeS) \
+ V(MipsI16x8AddSatU) \
+ V(MipsI16x8SubSatU) \
+ V(MipsI16x8MaxU) \
+ V(MipsI16x8MinU) \
+ V(MipsI16x8GtU) \
+ V(MipsI16x8GeU) \
+ V(MipsI16x8RoundingAverageU) \
+ V(MipsI16x8Abs) \
+ V(MipsI16x8BitMask) \
+ V(MipsI16x8Q15MulRSatS) \
+ V(MipsI16x8ExtMulLowI8x16S) \
+ V(MipsI16x8ExtMulHighI8x16S) \
+ V(MipsI16x8ExtMulLowI8x16U) \
+ V(MipsI16x8ExtMulHighI8x16U) \
+ V(MipsI16x8ExtAddPairwiseI8x16S) \
+ V(MipsI16x8ExtAddPairwiseI8x16U) \
+ V(MipsI8x16Splat) \
+ V(MipsI8x16ExtractLaneU) \
+ V(MipsI8x16ExtractLaneS) \
+ V(MipsI8x16ReplaceLane) \
+ V(MipsI8x16Neg) \
+ V(MipsI8x16Shl) \
+ V(MipsI8x16ShrS) \
+ V(MipsI8x16Add) \
+ V(MipsI8x16AddSatS) \
+ V(MipsI8x16Sub) \
+ V(MipsI8x16SubSatS) \
+ V(MipsI8x16MaxS) \
+ V(MipsI8x16MinS) \
+ V(MipsI8x16Eq) \
+ V(MipsI8x16Ne) \
+ V(MipsI8x16GtS) \
+ V(MipsI8x16GeS) \
+ V(MipsI8x16ShrU) \
+ V(MipsI8x16AddSatU) \
+ V(MipsI8x16SubSatU) \
+ V(MipsI8x16MaxU) \
+ V(MipsI8x16MinU) \
+ V(MipsI8x16GtU) \
+ V(MipsI8x16GeU) \
+ V(MipsI8x16RoundingAverageU) \
+ V(MipsI8x16Abs) \
+ V(MipsI8x16Popcnt) \
+ V(MipsI8x16BitMask) \
+ V(MipsS128And) \
+ V(MipsS128Or) \
+ V(MipsS128Xor) \
+ V(MipsS128Not) \
+ V(MipsS128Select) \
+ V(MipsS128AndNot) \
+ V(MipsI64x2AllTrue) \
+ V(MipsI32x4AllTrue) \
+ V(MipsI16x8AllTrue) \
+ V(MipsI8x16AllTrue) \
+ V(MipsV128AnyTrue) \
+ V(MipsS32x4InterleaveRight) \
+ V(MipsS32x4InterleaveLeft) \
+ V(MipsS32x4PackEven) \
+ V(MipsS32x4PackOdd) \
+ V(MipsS32x4InterleaveEven) \
+ V(MipsS32x4InterleaveOdd) \
+ V(MipsS32x4Shuffle) \
+ V(MipsS16x8InterleaveRight) \
+ V(MipsS16x8InterleaveLeft) \
+ V(MipsS16x8PackEven) \
+ V(MipsS16x8PackOdd) \
+ V(MipsS16x8InterleaveEven) \
+ V(MipsS16x8InterleaveOdd) \
+ V(MipsS16x4Reverse) \
+ V(MipsS16x2Reverse) \
+ V(MipsS8x16InterleaveRight) \
+ V(MipsS8x16InterleaveLeft) \
+ V(MipsS8x16PackEven) \
+ V(MipsS8x16PackOdd) \
+ V(MipsS8x16InterleaveEven) \
+ V(MipsS8x16InterleaveOdd) \
+ V(MipsI8x16Shuffle) \
+ V(MipsI8x16Swizzle) \
+ V(MipsS8x16Concat) \
+ V(MipsS8x8Reverse) \
+ V(MipsS8x4Reverse) \
+ V(MipsS8x2Reverse) \
+ V(MipsS128Load8Splat) \
+ V(MipsS128Load16Splat) \
+ V(MipsS128Load32Splat) \
+ V(MipsS128Load64Splat) \
+ V(MipsS128Load8x8S) \
+ V(MipsS128Load8x8U) \
+ V(MipsS128Load16x4S) \
+ V(MipsS128Load16x4U) \
+ V(MipsS128Load32x2S) \
+ V(MipsS128Load32x2U) \
+ V(MipsMsaLd) \
+ V(MipsMsaSt) \
+ V(MipsI32x4SConvertI16x8Low) \
+ V(MipsI32x4SConvertI16x8High) \
+ V(MipsI32x4UConvertI16x8Low) \
+ V(MipsI32x4UConvertI16x8High) \
+ V(MipsI16x8SConvertI8x16Low) \
+ V(MipsI16x8SConvertI8x16High) \
+ V(MipsI16x8SConvertI32x4) \
+ V(MipsI16x8UConvertI32x4) \
+ V(MipsI16x8UConvertI8x16Low) \
+ V(MipsI16x8UConvertI8x16High) \
+ V(MipsI8x16SConvertI16x8) \
+ V(MipsI8x16UConvertI16x8) \
+ V(MipsWord32AtomicPairLoad) \
+ V(MipsWord32AtomicPairStore) \
+ V(MipsWord32AtomicPairAdd) \
+ V(MipsWord32AtomicPairSub) \
+ V(MipsWord32AtomicPairAnd) \
+ V(MipsWord32AtomicPairOr) \
+ V(MipsWord32AtomicPairXor) \
+ V(MipsWord32AtomicPairExchange) \
V(MipsWord32AtomicPairCompareExchange)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index aeb1756227..d59392b40a 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -1427,7 +1427,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
2);
case kArchTableSwitch:
return AssembleArchTableSwitchLatency();
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
return CallLatency() + 1;
case kArchComment:
case kArchDeoptimize:
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index 477c791ca0..39d1feef96 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -278,9 +278,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
MipsOperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
}
void InstructionSelector::VisitLoadTransform(Node* node) {
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index f6fccd43d2..5d6a745407 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -770,13 +770,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == a0);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ stop();
@@ -1032,14 +1032,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64And32:
__ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
- __ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
break;
case kMips64Or:
__ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Or32:
__ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
- __ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
break;
case kMips64Nor:
if (instr->InputAt(1)->IsRegister()) {
@@ -1052,11 +1050,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64Nor32:
if (instr->InputAt(1)->IsRegister()) {
__ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
- __ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
} else {
DCHECK_EQ(0, i.InputOperand(1).immediate());
__ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
- __ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
}
break;
case kMips64Xor:
@@ -1103,23 +1099,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Shr:
if (instr->InputAt(1)->IsRegister()) {
- __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
__ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
- __ sll(i.OutputRegister(), i.InputRegister(0), 0x0);
- __ srl(i.OutputRegister(), i.OutputRegister(),
+ __ srl(i.OutputRegister(), i.InputRegister(0),
static_cast<uint16_t>(imm));
}
break;
case kMips64Sar:
if (instr->InputAt(1)->IsRegister()) {
- __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
__ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
- __ sll(i.OutputRegister(), i.InputRegister(0), 0x0);
- __ sra(i.OutputRegister(), i.OutputRegister(),
+ __ sra(i.OutputRegister(), i.InputRegister(0),
static_cast<uint16_t>(imm));
}
break;
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index 30d7f5af75..003b6bd6c2 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -11,393 +11,398 @@ namespace compiler {
// MIPS64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(Mips64Add) \
- V(Mips64Dadd) \
- V(Mips64DaddOvf) \
- V(Mips64Sub) \
- V(Mips64Dsub) \
- V(Mips64DsubOvf) \
- V(Mips64Mul) \
- V(Mips64MulOvf) \
- V(Mips64MulHigh) \
- V(Mips64DMulHigh) \
- V(Mips64MulHighU) \
- V(Mips64Dmul) \
- V(Mips64Div) \
- V(Mips64Ddiv) \
- V(Mips64DivU) \
- V(Mips64DdivU) \
- V(Mips64Mod) \
- V(Mips64Dmod) \
- V(Mips64ModU) \
- V(Mips64DmodU) \
- V(Mips64And) \
- V(Mips64And32) \
- V(Mips64Or) \
- V(Mips64Or32) \
- V(Mips64Nor) \
- V(Mips64Nor32) \
- V(Mips64Xor) \
- V(Mips64Xor32) \
- V(Mips64Clz) \
- V(Mips64Lsa) \
- V(Mips64Dlsa) \
- V(Mips64Shl) \
- V(Mips64Shr) \
- V(Mips64Sar) \
- V(Mips64Ext) \
- V(Mips64Ins) \
- V(Mips64Dext) \
- V(Mips64Dins) \
- V(Mips64Dclz) \
- V(Mips64Ctz) \
- V(Mips64Dctz) \
- V(Mips64Popcnt) \
- V(Mips64Dpopcnt) \
- V(Mips64Dshl) \
- V(Mips64Dshr) \
- V(Mips64Dsar) \
- V(Mips64Ror) \
- V(Mips64Dror) \
- V(Mips64Mov) \
- V(Mips64Tst) \
- V(Mips64Cmp) \
- V(Mips64CmpS) \
- V(Mips64AddS) \
- V(Mips64SubS) \
- V(Mips64MulS) \
- V(Mips64DivS) \
- V(Mips64AbsS) \
- V(Mips64NegS) \
- V(Mips64SqrtS) \
- V(Mips64MaxS) \
- V(Mips64MinS) \
- V(Mips64CmpD) \
- V(Mips64AddD) \
- V(Mips64SubD) \
- V(Mips64MulD) \
- V(Mips64DivD) \
- V(Mips64ModD) \
- V(Mips64AbsD) \
- V(Mips64NegD) \
- V(Mips64SqrtD) \
- V(Mips64MaxD) \
- V(Mips64MinD) \
- V(Mips64Float64RoundDown) \
- V(Mips64Float64RoundTruncate) \
- V(Mips64Float64RoundUp) \
- V(Mips64Float64RoundTiesEven) \
- V(Mips64Float32RoundDown) \
- V(Mips64Float32RoundTruncate) \
- V(Mips64Float32RoundUp) \
- V(Mips64Float32RoundTiesEven) \
- V(Mips64CvtSD) \
- V(Mips64CvtDS) \
- V(Mips64TruncWD) \
- V(Mips64RoundWD) \
- V(Mips64FloorWD) \
- V(Mips64CeilWD) \
- V(Mips64TruncWS) \
- V(Mips64RoundWS) \
- V(Mips64FloorWS) \
- V(Mips64CeilWS) \
- V(Mips64TruncLS) \
- V(Mips64TruncLD) \
- V(Mips64TruncUwD) \
- V(Mips64TruncUwS) \
- V(Mips64TruncUlS) \
- V(Mips64TruncUlD) \
- V(Mips64CvtDW) \
- V(Mips64CvtSL) \
- V(Mips64CvtSW) \
- V(Mips64CvtSUw) \
- V(Mips64CvtSUl) \
- V(Mips64CvtDL) \
- V(Mips64CvtDUw) \
- V(Mips64CvtDUl) \
- V(Mips64Lb) \
- V(Mips64Lbu) \
- V(Mips64Sb) \
- V(Mips64Lh) \
- V(Mips64Ulh) \
- V(Mips64Lhu) \
- V(Mips64Ulhu) \
- V(Mips64Sh) \
- V(Mips64Ush) \
- V(Mips64Ld) \
- V(Mips64Uld) \
- V(Mips64Lw) \
- V(Mips64Ulw) \
- V(Mips64Lwu) \
- V(Mips64Ulwu) \
- V(Mips64Sw) \
- V(Mips64Usw) \
- V(Mips64Sd) \
- V(Mips64Usd) \
- V(Mips64Lwc1) \
- V(Mips64Ulwc1) \
- V(Mips64Swc1) \
- V(Mips64Uswc1) \
- V(Mips64Ldc1) \
- V(Mips64Uldc1) \
- V(Mips64Sdc1) \
- V(Mips64Usdc1) \
- V(Mips64BitcastDL) \
- V(Mips64BitcastLD) \
- V(Mips64Float64ExtractLowWord32) \
- V(Mips64Float64ExtractHighWord32) \
- V(Mips64Float64InsertLowWord32) \
- V(Mips64Float64InsertHighWord32) \
- V(Mips64Float32Max) \
- V(Mips64Float64Max) \
- V(Mips64Float32Min) \
- V(Mips64Float64Min) \
- V(Mips64Float64SilenceNaN) \
- V(Mips64Push) \
- V(Mips64Peek) \
- V(Mips64StoreToStackSlot) \
- V(Mips64ByteSwap64) \
- V(Mips64ByteSwap32) \
- V(Mips64StackClaim) \
- V(Mips64Seb) \
- V(Mips64Seh) \
- V(Mips64Sync) \
- V(Mips64AssertEqual) \
- V(Mips64S128Const) \
- V(Mips64S128Zero) \
- V(Mips64S128AllOnes) \
- V(Mips64I32x4Splat) \
- V(Mips64I32x4ExtractLane) \
- V(Mips64I32x4ReplaceLane) \
- V(Mips64I32x4Add) \
- V(Mips64I32x4Sub) \
- V(Mips64F64x2Abs) \
- V(Mips64F64x2Neg) \
- V(Mips64F32x4Splat) \
- V(Mips64F32x4ExtractLane) \
- V(Mips64F32x4ReplaceLane) \
- V(Mips64F32x4SConvertI32x4) \
- V(Mips64F32x4UConvertI32x4) \
- V(Mips64I32x4Mul) \
- V(Mips64I32x4MaxS) \
- V(Mips64I32x4MinS) \
- V(Mips64I32x4Eq) \
- V(Mips64I32x4Ne) \
- V(Mips64I32x4Shl) \
- V(Mips64I32x4ShrS) \
- V(Mips64I32x4ShrU) \
- V(Mips64I32x4MaxU) \
- V(Mips64I32x4MinU) \
- V(Mips64F64x2Sqrt) \
- V(Mips64F64x2Add) \
- V(Mips64F64x2Sub) \
- V(Mips64F64x2Mul) \
- V(Mips64F64x2Div) \
- V(Mips64F64x2Min) \
- V(Mips64F64x2Max) \
- V(Mips64F64x2Eq) \
- V(Mips64F64x2Ne) \
- V(Mips64F64x2Lt) \
- V(Mips64F64x2Le) \
- V(Mips64F64x2Splat) \
- V(Mips64F64x2ExtractLane) \
- V(Mips64F64x2ReplaceLane) \
- V(Mips64F64x2Pmin) \
- V(Mips64F64x2Pmax) \
- V(Mips64F64x2Ceil) \
- V(Mips64F64x2Floor) \
- V(Mips64F64x2Trunc) \
- V(Mips64F64x2NearestInt) \
- V(Mips64F64x2ConvertLowI32x4S) \
- V(Mips64F64x2ConvertLowI32x4U) \
- V(Mips64F64x2PromoteLowF32x4) \
- V(Mips64I64x2Splat) \
- V(Mips64I64x2ExtractLane) \
- V(Mips64I64x2ReplaceLane) \
- V(Mips64I64x2Add) \
- V(Mips64I64x2Sub) \
- V(Mips64I64x2Mul) \
- V(Mips64I64x2Neg) \
- V(Mips64I64x2Shl) \
- V(Mips64I64x2ShrS) \
- V(Mips64I64x2ShrU) \
- V(Mips64I64x2BitMask) \
- V(Mips64I64x2Eq) \
- V(Mips64I64x2Ne) \
- V(Mips64I64x2GtS) \
- V(Mips64I64x2GeS) \
- V(Mips64I64x2Abs) \
- V(Mips64I64x2SConvertI32x4Low) \
- V(Mips64I64x2SConvertI32x4High) \
- V(Mips64I64x2UConvertI32x4Low) \
- V(Mips64I64x2UConvertI32x4High) \
- V(Mips64ExtMulLow) \
- V(Mips64ExtMulHigh) \
- V(Mips64ExtAddPairwise) \
- V(Mips64F32x4Abs) \
- V(Mips64F32x4Neg) \
- V(Mips64F32x4Sqrt) \
- V(Mips64F32x4RecipApprox) \
- V(Mips64F32x4RecipSqrtApprox) \
- V(Mips64F32x4Add) \
- V(Mips64F32x4Sub) \
- V(Mips64F32x4Mul) \
- V(Mips64F32x4Div) \
- V(Mips64F32x4Max) \
- V(Mips64F32x4Min) \
- V(Mips64F32x4Eq) \
- V(Mips64F32x4Ne) \
- V(Mips64F32x4Lt) \
- V(Mips64F32x4Le) \
- V(Mips64F32x4Pmin) \
- V(Mips64F32x4Pmax) \
- V(Mips64F32x4Ceil) \
- V(Mips64F32x4Floor) \
- V(Mips64F32x4Trunc) \
- V(Mips64F32x4NearestInt) \
- V(Mips64F32x4DemoteF64x2Zero) \
- V(Mips64I32x4SConvertF32x4) \
- V(Mips64I32x4UConvertF32x4) \
- V(Mips64I32x4Neg) \
- V(Mips64I32x4GtS) \
- V(Mips64I32x4GeS) \
- V(Mips64I32x4GtU) \
- V(Mips64I32x4GeU) \
- V(Mips64I32x4Abs) \
- V(Mips64I32x4BitMask) \
- V(Mips64I32x4DotI16x8S) \
- V(Mips64I32x4TruncSatF64x2SZero) \
- V(Mips64I32x4TruncSatF64x2UZero) \
- V(Mips64I16x8Splat) \
- V(Mips64I16x8ExtractLaneU) \
- V(Mips64I16x8ExtractLaneS) \
- V(Mips64I16x8ReplaceLane) \
- V(Mips64I16x8Neg) \
- V(Mips64I16x8Shl) \
- V(Mips64I16x8ShrS) \
- V(Mips64I16x8ShrU) \
- V(Mips64I16x8Add) \
- V(Mips64I16x8AddSatS) \
- V(Mips64I16x8Sub) \
- V(Mips64I16x8SubSatS) \
- V(Mips64I16x8Mul) \
- V(Mips64I16x8MaxS) \
- V(Mips64I16x8MinS) \
- V(Mips64I16x8Eq) \
- V(Mips64I16x8Ne) \
- V(Mips64I16x8GtS) \
- V(Mips64I16x8GeS) \
- V(Mips64I16x8AddSatU) \
- V(Mips64I16x8SubSatU) \
- V(Mips64I16x8MaxU) \
- V(Mips64I16x8MinU) \
- V(Mips64I16x8GtU) \
- V(Mips64I16x8GeU) \
- V(Mips64I16x8RoundingAverageU) \
- V(Mips64I16x8Abs) \
- V(Mips64I16x8BitMask) \
- V(Mips64I16x8Q15MulRSatS) \
- V(Mips64I8x16Splat) \
- V(Mips64I8x16ExtractLaneU) \
- V(Mips64I8x16ExtractLaneS) \
- V(Mips64I8x16ReplaceLane) \
- V(Mips64I8x16Neg) \
- V(Mips64I8x16Shl) \
- V(Mips64I8x16ShrS) \
- V(Mips64I8x16Add) \
- V(Mips64I8x16AddSatS) \
- V(Mips64I8x16Sub) \
- V(Mips64I8x16SubSatS) \
- V(Mips64I8x16MaxS) \
- V(Mips64I8x16MinS) \
- V(Mips64I8x16Eq) \
- V(Mips64I8x16Ne) \
- V(Mips64I8x16GtS) \
- V(Mips64I8x16GeS) \
- V(Mips64I8x16ShrU) \
- V(Mips64I8x16AddSatU) \
- V(Mips64I8x16SubSatU) \
- V(Mips64I8x16MaxU) \
- V(Mips64I8x16MinU) \
- V(Mips64I8x16GtU) \
- V(Mips64I8x16GeU) \
- V(Mips64I8x16RoundingAverageU) \
- V(Mips64I8x16Abs) \
- V(Mips64I8x16Popcnt) \
- V(Mips64I8x16BitMask) \
- V(Mips64S128And) \
- V(Mips64S128Or) \
- V(Mips64S128Xor) \
- V(Mips64S128Not) \
- V(Mips64S128Select) \
- V(Mips64S128AndNot) \
- V(Mips64I64x2AllTrue) \
- V(Mips64I32x4AllTrue) \
- V(Mips64I16x8AllTrue) \
- V(Mips64I8x16AllTrue) \
- V(Mips64V128AnyTrue) \
- V(Mips64S32x4InterleaveRight) \
- V(Mips64S32x4InterleaveLeft) \
- V(Mips64S32x4PackEven) \
- V(Mips64S32x4PackOdd) \
- V(Mips64S32x4InterleaveEven) \
- V(Mips64S32x4InterleaveOdd) \
- V(Mips64S32x4Shuffle) \
- V(Mips64S16x8InterleaveRight) \
- V(Mips64S16x8InterleaveLeft) \
- V(Mips64S16x8PackEven) \
- V(Mips64S16x8PackOdd) \
- V(Mips64S16x8InterleaveEven) \
- V(Mips64S16x8InterleaveOdd) \
- V(Mips64S16x4Reverse) \
- V(Mips64S16x2Reverse) \
- V(Mips64S8x16InterleaveRight) \
- V(Mips64S8x16InterleaveLeft) \
- V(Mips64S8x16PackEven) \
- V(Mips64S8x16PackOdd) \
- V(Mips64S8x16InterleaveEven) \
- V(Mips64S8x16InterleaveOdd) \
- V(Mips64I8x16Shuffle) \
- V(Mips64I8x16Swizzle) \
- V(Mips64S8x16Concat) \
- V(Mips64S8x8Reverse) \
- V(Mips64S8x4Reverse) \
- V(Mips64S8x2Reverse) \
- V(Mips64S128LoadSplat) \
- V(Mips64S128Load8x8S) \
- V(Mips64S128Load8x8U) \
- V(Mips64S128Load16x4S) \
- V(Mips64S128Load16x4U) \
- V(Mips64S128Load32x2S) \
- V(Mips64S128Load32x2U) \
- V(Mips64S128Load32Zero) \
- V(Mips64S128Load64Zero) \
- V(Mips64S128LoadLane) \
- V(Mips64S128StoreLane) \
- V(Mips64MsaLd) \
- V(Mips64MsaSt) \
- V(Mips64I32x4SConvertI16x8Low) \
- V(Mips64I32x4SConvertI16x8High) \
- V(Mips64I32x4UConvertI16x8Low) \
- V(Mips64I32x4UConvertI16x8High) \
- V(Mips64I16x8SConvertI8x16Low) \
- V(Mips64I16x8SConvertI8x16High) \
- V(Mips64I16x8SConvertI32x4) \
- V(Mips64I16x8UConvertI32x4) \
- V(Mips64I16x8UConvertI8x16Low) \
- V(Mips64I16x8UConvertI8x16High) \
- V(Mips64I8x16SConvertI16x8) \
- V(Mips64I8x16UConvertI16x8) \
- V(Mips64StoreCompressTagged) \
- V(Mips64Word64AtomicLoadUint64) \
- V(Mips64Word64AtomicStoreWord64) \
- V(Mips64Word64AtomicAddUint64) \
- V(Mips64Word64AtomicSubUint64) \
- V(Mips64Word64AtomicAndUint64) \
- V(Mips64Word64AtomicOrUint64) \
- V(Mips64Word64AtomicXorUint64) \
- V(Mips64Word64AtomicExchangeUint64) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(Mips64Add) \
+ V(Mips64Dadd) \
+ V(Mips64DaddOvf) \
+ V(Mips64Sub) \
+ V(Mips64Dsub) \
+ V(Mips64DsubOvf) \
+ V(Mips64Mul) \
+ V(Mips64MulOvf) \
+ V(Mips64MulHigh) \
+ V(Mips64DMulHigh) \
+ V(Mips64MulHighU) \
+ V(Mips64Dmul) \
+ V(Mips64Div) \
+ V(Mips64Ddiv) \
+ V(Mips64DivU) \
+ V(Mips64DdivU) \
+ V(Mips64Mod) \
+ V(Mips64Dmod) \
+ V(Mips64ModU) \
+ V(Mips64DmodU) \
+ V(Mips64And) \
+ V(Mips64And32) \
+ V(Mips64Or) \
+ V(Mips64Or32) \
+ V(Mips64Nor) \
+ V(Mips64Nor32) \
+ V(Mips64Xor) \
+ V(Mips64Xor32) \
+ V(Mips64Clz) \
+ V(Mips64Lsa) \
+ V(Mips64Dlsa) \
+ V(Mips64Shl) \
+ V(Mips64Shr) \
+ V(Mips64Sar) \
+ V(Mips64Ext) \
+ V(Mips64Ins) \
+ V(Mips64Dext) \
+ V(Mips64Dins) \
+ V(Mips64Dclz) \
+ V(Mips64Ctz) \
+ V(Mips64Dctz) \
+ V(Mips64Popcnt) \
+ V(Mips64Dpopcnt) \
+ V(Mips64Dshl) \
+ V(Mips64Dshr) \
+ V(Mips64Dsar) \
+ V(Mips64Ror) \
+ V(Mips64Dror) \
+ V(Mips64Mov) \
+ V(Mips64Tst) \
+ V(Mips64Cmp) \
+ V(Mips64CmpS) \
+ V(Mips64AddS) \
+ V(Mips64SubS) \
+ V(Mips64MulS) \
+ V(Mips64DivS) \
+ V(Mips64AbsS) \
+ V(Mips64NegS) \
+ V(Mips64SqrtS) \
+ V(Mips64MaxS) \
+ V(Mips64MinS) \
+ V(Mips64CmpD) \
+ V(Mips64AddD) \
+ V(Mips64SubD) \
+ V(Mips64MulD) \
+ V(Mips64DivD) \
+ V(Mips64ModD) \
+ V(Mips64AbsD) \
+ V(Mips64NegD) \
+ V(Mips64SqrtD) \
+ V(Mips64MaxD) \
+ V(Mips64MinD) \
+ V(Mips64Float64RoundDown) \
+ V(Mips64Float64RoundTruncate) \
+ V(Mips64Float64RoundUp) \
+ V(Mips64Float64RoundTiesEven) \
+ V(Mips64Float32RoundDown) \
+ V(Mips64Float32RoundTruncate) \
+ V(Mips64Float32RoundUp) \
+ V(Mips64Float32RoundTiesEven) \
+ V(Mips64CvtSD) \
+ V(Mips64CvtDS) \
+ V(Mips64TruncWD) \
+ V(Mips64RoundWD) \
+ V(Mips64FloorWD) \
+ V(Mips64CeilWD) \
+ V(Mips64TruncWS) \
+ V(Mips64RoundWS) \
+ V(Mips64FloorWS) \
+ V(Mips64CeilWS) \
+ V(Mips64TruncLS) \
+ V(Mips64TruncLD) \
+ V(Mips64TruncUwD) \
+ V(Mips64TruncUwS) \
+ V(Mips64TruncUlS) \
+ V(Mips64TruncUlD) \
+ V(Mips64CvtDW) \
+ V(Mips64CvtSL) \
+ V(Mips64CvtSW) \
+ V(Mips64CvtSUw) \
+ V(Mips64CvtSUl) \
+ V(Mips64CvtDL) \
+ V(Mips64CvtDUw) \
+ V(Mips64CvtDUl) \
+ V(Mips64Lb) \
+ V(Mips64Lbu) \
+ V(Mips64Sb) \
+ V(Mips64Lh) \
+ V(Mips64Ulh) \
+ V(Mips64Lhu) \
+ V(Mips64Ulhu) \
+ V(Mips64Sh) \
+ V(Mips64Ush) \
+ V(Mips64Ld) \
+ V(Mips64Uld) \
+ V(Mips64Lw) \
+ V(Mips64Ulw) \
+ V(Mips64Lwu) \
+ V(Mips64Ulwu) \
+ V(Mips64Sw) \
+ V(Mips64Usw) \
+ V(Mips64Sd) \
+ V(Mips64Usd) \
+ V(Mips64Lwc1) \
+ V(Mips64Ulwc1) \
+ V(Mips64Swc1) \
+ V(Mips64Uswc1) \
+ V(Mips64Ldc1) \
+ V(Mips64Uldc1) \
+ V(Mips64Sdc1) \
+ V(Mips64Usdc1) \
+ V(Mips64BitcastDL) \
+ V(Mips64BitcastLD) \
+ V(Mips64Float64ExtractLowWord32) \
+ V(Mips64Float64ExtractHighWord32) \
+ V(Mips64Float64InsertLowWord32) \
+ V(Mips64Float64InsertHighWord32) \
+ V(Mips64Float32Max) \
+ V(Mips64Float64Max) \
+ V(Mips64Float32Min) \
+ V(Mips64Float64Min) \
+ V(Mips64Float64SilenceNaN) \
+ V(Mips64Push) \
+ V(Mips64Peek) \
+ V(Mips64StoreToStackSlot) \
+ V(Mips64ByteSwap64) \
+ V(Mips64ByteSwap32) \
+ V(Mips64StackClaim) \
+ V(Mips64Seb) \
+ V(Mips64Seh) \
+ V(Mips64Sync) \
+ V(Mips64AssertEqual) \
+ V(Mips64S128Const) \
+ V(Mips64S128Zero) \
+ V(Mips64S128AllOnes) \
+ V(Mips64I32x4Splat) \
+ V(Mips64I32x4ExtractLane) \
+ V(Mips64I32x4ReplaceLane) \
+ V(Mips64I32x4Add) \
+ V(Mips64I32x4Sub) \
+ V(Mips64F64x2Abs) \
+ V(Mips64F64x2Neg) \
+ V(Mips64F32x4Splat) \
+ V(Mips64F32x4ExtractLane) \
+ V(Mips64F32x4ReplaceLane) \
+ V(Mips64F32x4SConvertI32x4) \
+ V(Mips64F32x4UConvertI32x4) \
+ V(Mips64I32x4Mul) \
+ V(Mips64I32x4MaxS) \
+ V(Mips64I32x4MinS) \
+ V(Mips64I32x4Eq) \
+ V(Mips64I32x4Ne) \
+ V(Mips64I32x4Shl) \
+ V(Mips64I32x4ShrS) \
+ V(Mips64I32x4ShrU) \
+ V(Mips64I32x4MaxU) \
+ V(Mips64I32x4MinU) \
+ V(Mips64F64x2Sqrt) \
+ V(Mips64F64x2Add) \
+ V(Mips64F64x2Sub) \
+ V(Mips64F64x2Mul) \
+ V(Mips64F64x2Div) \
+ V(Mips64F64x2Min) \
+ V(Mips64F64x2Max) \
+ V(Mips64F64x2Eq) \
+ V(Mips64F64x2Ne) \
+ V(Mips64F64x2Lt) \
+ V(Mips64F64x2Le) \
+ V(Mips64F64x2Splat) \
+ V(Mips64F64x2ExtractLane) \
+ V(Mips64F64x2ReplaceLane) \
+ V(Mips64F64x2Pmin) \
+ V(Mips64F64x2Pmax) \
+ V(Mips64F64x2Ceil) \
+ V(Mips64F64x2Floor) \
+ V(Mips64F64x2Trunc) \
+ V(Mips64F64x2NearestInt) \
+ V(Mips64F64x2ConvertLowI32x4S) \
+ V(Mips64F64x2ConvertLowI32x4U) \
+ V(Mips64F64x2PromoteLowF32x4) \
+ V(Mips64I64x2Splat) \
+ V(Mips64I64x2ExtractLane) \
+ V(Mips64I64x2ReplaceLane) \
+ V(Mips64I64x2Add) \
+ V(Mips64I64x2Sub) \
+ V(Mips64I64x2Mul) \
+ V(Mips64I64x2Neg) \
+ V(Mips64I64x2Shl) \
+ V(Mips64I64x2ShrS) \
+ V(Mips64I64x2ShrU) \
+ V(Mips64I64x2BitMask) \
+ V(Mips64I64x2Eq) \
+ V(Mips64I64x2Ne) \
+ V(Mips64I64x2GtS) \
+ V(Mips64I64x2GeS) \
+ V(Mips64I64x2Abs) \
+ V(Mips64I64x2SConvertI32x4Low) \
+ V(Mips64I64x2SConvertI32x4High) \
+ V(Mips64I64x2UConvertI32x4Low) \
+ V(Mips64I64x2UConvertI32x4High) \
+ V(Mips64ExtMulLow) \
+ V(Mips64ExtMulHigh) \
+ V(Mips64ExtAddPairwise) \
+ V(Mips64F32x4Abs) \
+ V(Mips64F32x4Neg) \
+ V(Mips64F32x4Sqrt) \
+ V(Mips64F32x4RecipApprox) \
+ V(Mips64F32x4RecipSqrtApprox) \
+ V(Mips64F32x4Add) \
+ V(Mips64F32x4Sub) \
+ V(Mips64F32x4Mul) \
+ V(Mips64F32x4Div) \
+ V(Mips64F32x4Max) \
+ V(Mips64F32x4Min) \
+ V(Mips64F32x4Eq) \
+ V(Mips64F32x4Ne) \
+ V(Mips64F32x4Lt) \
+ V(Mips64F32x4Le) \
+ V(Mips64F32x4Pmin) \
+ V(Mips64F32x4Pmax) \
+ V(Mips64F32x4Ceil) \
+ V(Mips64F32x4Floor) \
+ V(Mips64F32x4Trunc) \
+ V(Mips64F32x4NearestInt) \
+ V(Mips64F32x4DemoteF64x2Zero) \
+ V(Mips64I32x4SConvertF32x4) \
+ V(Mips64I32x4UConvertF32x4) \
+ V(Mips64I32x4Neg) \
+ V(Mips64I32x4GtS) \
+ V(Mips64I32x4GeS) \
+ V(Mips64I32x4GtU) \
+ V(Mips64I32x4GeU) \
+ V(Mips64I32x4Abs) \
+ V(Mips64I32x4BitMask) \
+ V(Mips64I32x4DotI16x8S) \
+ V(Mips64I32x4TruncSatF64x2SZero) \
+ V(Mips64I32x4TruncSatF64x2UZero) \
+ V(Mips64I16x8Splat) \
+ V(Mips64I16x8ExtractLaneU) \
+ V(Mips64I16x8ExtractLaneS) \
+ V(Mips64I16x8ReplaceLane) \
+ V(Mips64I16x8Neg) \
+ V(Mips64I16x8Shl) \
+ V(Mips64I16x8ShrS) \
+ V(Mips64I16x8ShrU) \
+ V(Mips64I16x8Add) \
+ V(Mips64I16x8AddSatS) \
+ V(Mips64I16x8Sub) \
+ V(Mips64I16x8SubSatS) \
+ V(Mips64I16x8Mul) \
+ V(Mips64I16x8MaxS) \
+ V(Mips64I16x8MinS) \
+ V(Mips64I16x8Eq) \
+ V(Mips64I16x8Ne) \
+ V(Mips64I16x8GtS) \
+ V(Mips64I16x8GeS) \
+ V(Mips64I16x8AddSatU) \
+ V(Mips64I16x8SubSatU) \
+ V(Mips64I16x8MaxU) \
+ V(Mips64I16x8MinU) \
+ V(Mips64I16x8GtU) \
+ V(Mips64I16x8GeU) \
+ V(Mips64I16x8RoundingAverageU) \
+ V(Mips64I16x8Abs) \
+ V(Mips64I16x8BitMask) \
+ V(Mips64I16x8Q15MulRSatS) \
+ V(Mips64I8x16Splat) \
+ V(Mips64I8x16ExtractLaneU) \
+ V(Mips64I8x16ExtractLaneS) \
+ V(Mips64I8x16ReplaceLane) \
+ V(Mips64I8x16Neg) \
+ V(Mips64I8x16Shl) \
+ V(Mips64I8x16ShrS) \
+ V(Mips64I8x16Add) \
+ V(Mips64I8x16AddSatS) \
+ V(Mips64I8x16Sub) \
+ V(Mips64I8x16SubSatS) \
+ V(Mips64I8x16MaxS) \
+ V(Mips64I8x16MinS) \
+ V(Mips64I8x16Eq) \
+ V(Mips64I8x16Ne) \
+ V(Mips64I8x16GtS) \
+ V(Mips64I8x16GeS) \
+ V(Mips64I8x16ShrU) \
+ V(Mips64I8x16AddSatU) \
+ V(Mips64I8x16SubSatU) \
+ V(Mips64I8x16MaxU) \
+ V(Mips64I8x16MinU) \
+ V(Mips64I8x16GtU) \
+ V(Mips64I8x16GeU) \
+ V(Mips64I8x16RoundingAverageU) \
+ V(Mips64I8x16Abs) \
+ V(Mips64I8x16Popcnt) \
+ V(Mips64I8x16BitMask) \
+ V(Mips64S128And) \
+ V(Mips64S128Or) \
+ V(Mips64S128Xor) \
+ V(Mips64S128Not) \
+ V(Mips64S128Select) \
+ V(Mips64S128AndNot) \
+ V(Mips64I64x2AllTrue) \
+ V(Mips64I32x4AllTrue) \
+ V(Mips64I16x8AllTrue) \
+ V(Mips64I8x16AllTrue) \
+ V(Mips64V128AnyTrue) \
+ V(Mips64S32x4InterleaveRight) \
+ V(Mips64S32x4InterleaveLeft) \
+ V(Mips64S32x4PackEven) \
+ V(Mips64S32x4PackOdd) \
+ V(Mips64S32x4InterleaveEven) \
+ V(Mips64S32x4InterleaveOdd) \
+ V(Mips64S32x4Shuffle) \
+ V(Mips64S16x8InterleaveRight) \
+ V(Mips64S16x8InterleaveLeft) \
+ V(Mips64S16x8PackEven) \
+ V(Mips64S16x8PackOdd) \
+ V(Mips64S16x8InterleaveEven) \
+ V(Mips64S16x8InterleaveOdd) \
+ V(Mips64S16x4Reverse) \
+ V(Mips64S16x2Reverse) \
+ V(Mips64S8x16InterleaveRight) \
+ V(Mips64S8x16InterleaveLeft) \
+ V(Mips64S8x16PackEven) \
+ V(Mips64S8x16PackOdd) \
+ V(Mips64S8x16InterleaveEven) \
+ V(Mips64S8x16InterleaveOdd) \
+ V(Mips64I8x16Shuffle) \
+ V(Mips64I8x16Swizzle) \
+ V(Mips64S8x16Concat) \
+ V(Mips64S8x8Reverse) \
+ V(Mips64S8x4Reverse) \
+ V(Mips64S8x2Reverse) \
+ V(Mips64S128LoadSplat) \
+ V(Mips64S128Load8x8S) \
+ V(Mips64S128Load8x8U) \
+ V(Mips64S128Load16x4S) \
+ V(Mips64S128Load16x4U) \
+ V(Mips64S128Load32x2S) \
+ V(Mips64S128Load32x2U) \
+ V(Mips64S128Load32Zero) \
+ V(Mips64S128Load64Zero) \
+ V(Mips64S128LoadLane) \
+ V(Mips64S128StoreLane) \
+ V(Mips64MsaLd) \
+ V(Mips64MsaSt) \
+ V(Mips64I32x4SConvertI16x8Low) \
+ V(Mips64I32x4SConvertI16x8High) \
+ V(Mips64I32x4UConvertI16x8Low) \
+ V(Mips64I32x4UConvertI16x8High) \
+ V(Mips64I16x8SConvertI8x16Low) \
+ V(Mips64I16x8SConvertI8x16High) \
+ V(Mips64I16x8SConvertI32x4) \
+ V(Mips64I16x8UConvertI32x4) \
+ V(Mips64I16x8UConvertI8x16Low) \
+ V(Mips64I16x8UConvertI8x16High) \
+ V(Mips64I8x16SConvertI16x8) \
+ V(Mips64I8x16UConvertI16x8) \
+ V(Mips64StoreCompressTagged) \
+ V(Mips64Word64AtomicLoadUint64) \
+ V(Mips64Word64AtomicStoreWord64) \
+ V(Mips64Word64AtomicAddUint64) \
+ V(Mips64Word64AtomicSubUint64) \
+ V(Mips64Word64AtomicAndUint64) \
+ V(Mips64Word64AtomicOrUint64) \
+ V(Mips64Word64AtomicXorUint64) \
+ V(Mips64Word64AtomicExchangeUint64) \
V(Mips64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index f79e334ed6..734009ca30 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -1301,7 +1301,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return AssembleArchJumpLatency();
case kArchTableSwitch:
return AssembleArchTableSwitchLatency();
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
return CallLatency() + 1;
case kArchDebugBreak:
return 1;
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 192f82c9db..93c123bd65 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -311,14 +311,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
}
- if (cont->IsDeoptimize()) {
- // If we can deoptimize as a result of the binop, we need to make sure that
- // the deopt inputs are not overwritten by the binop result. One way
- // to achieve that is to declare the output register as same-as-first.
- outputs[output_count++] = g.DefineSameAsFirst(node);
- } else {
- outputs[output_count++] = g.DefineAsRegister(node);
- }
+ outputs[output_count++] = g.DefineAsRegister(node);
DCHECK_NE(0u, input_count);
DCHECK_EQ(1u, output_count);
@@ -356,9 +349,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
Mips64OperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
}
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
@@ -498,7 +491,7 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
break;
case MachineRepresentation::kWord32:
- opcode = load_rep.IsUnsigned() ? kMips64Lwu : kMips64Lw;
+ opcode = kMips64Lw;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
@@ -854,7 +847,7 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
// There's no need to sign/zero-extend to 64-bit if we shift out the upper
// 32 bits anyway.
- Emit(kMips64Dshl, g.DefineSameAsFirst(node),
+ Emit(kMips64Dshl, g.DefineAsRegister(node),
g.UseRegister(m.left().node()->InputAt(0)),
g.UseImmediate(m.right().node()));
return;
@@ -1446,44 +1439,49 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
}
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+ // On MIPS64, int32 values should all be sign-extended to 64-bit, so
+ // no need to sign-extend them here.
+ // But when call to a host function in simulator, if the function return an
+ // int32 value, the simulator do not sign-extend to int64, because in
+ // simulator we do not know the function whether return an int32 or int64.
+#ifdef USE_SIMULATOR
Node* value = node->InputAt(0);
- if ((value->opcode() == IrOpcode::kLoad ||
- value->opcode() == IrOpcode::kLoadImmutable) &&
- CanCover(node, value)) {
- // Generate sign-extending load.
- LoadRepresentation load_rep = LoadRepresentationOf(value->op());
- InstructionCode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
- break;
- case MachineRepresentation::kWord32:
- opcode = kMips64Lw;
- break;
- default:
- UNREACHABLE();
- }
- EmitLoad(this, value, opcode, node);
- } else {
+ if (value->opcode() == IrOpcode::kCall) {
Mips64OperandGenerator g(this);
- Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(value),
g.TempImmediate(0));
+ return;
}
+#endif
+ EmitIdentity(node);
}
bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
DCHECK_NE(node->opcode(), IrOpcode::kPhi);
switch (node->opcode()) {
- // 32-bit operations will write their result in a 64 bit register,
- // clearing the top 32 bits of the destination register.
- case IrOpcode::kUint32Div:
- case IrOpcode::kUint32Mod:
- case IrOpcode::kUint32MulHigh:
+ // Comparisons only emit 0/1, so the upper 32 bits must be zero.
+ case IrOpcode::kWord32Equal:
+ case IrOpcode::kInt32LessThan:
+ case IrOpcode::kInt32LessThanOrEqual:
+ case IrOpcode::kUint32LessThan:
+ case IrOpcode::kUint32LessThanOrEqual:
return true;
+ case IrOpcode::kWord32And: {
+ Int32BinopMatcher m(node);
+ if (m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
+ return is_uint31(mask);
+ }
+ return false;
+ }
+ case IrOpcode::kWord32Shr: {
+ Int32BinopMatcher m(node);
+ if (m.right().HasResolvedValue()) {
+ uint8_t sa = m.right().ResolvedValue() & 0x1f;
+ return sa > 0;
+ }
+ return false;
+ }
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable: {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -1491,7 +1489,6 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
- case MachineRepresentation::kWord32:
return true;
default:
return false;
@@ -1507,10 +1504,24 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
Mips64OperandGenerator g(this);
Node* value = node->InputAt(0);
+ IrOpcode::Value opcode = value->opcode();
+
+ if (opcode == IrOpcode::kLoad || opcode == IrOpcode::kUnalignedLoad) {
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ ArchOpcode arch_opcode =
+ opcode == IrOpcode::kUnalignedLoad ? kMips64Ulwu : kMips64Lwu;
+ if (load_rep.IsUnsigned() &&
+ load_rep.representation() == MachineRepresentation::kWord32) {
+ EmitLoad(this, value, arch_opcode, node);
+ return;
+ }
+ }
+
if (ZeroExtendsWord32ToWord64(value)) {
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ EmitIdentity(node);
return;
}
+
Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.TempImmediate(0), g.TempImmediate(32));
}
@@ -1528,7 +1539,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Int64BinopMatcher m(value);
if (m.right().IsInRange(32, 63)) {
// After smi untagging no need for truncate. Combine sequence.
- Emit(kMips64Dsar, g.DefineSameAsFirst(node),
+ Emit(kMips64Dsar, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
g.UseImmediate(m.right().node()));
return;
@@ -1540,8 +1551,8 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
break;
}
}
- Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
- g.TempImmediate(0), g.TempImmediate(32));
+ Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
@@ -1836,7 +1847,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh;
break;
case MachineRepresentation::kWord32:
- opcode = load_rep.IsUnsigned() ? kMips64Ulwu : kMips64Ulw;
+ opcode = kMips64Ulw;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index 0bf29ba686..b91f6209f2 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -564,69 +564,35 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
__ sync(); \
} while (0)
-#define ASSEMBLE_ATOMIC_BINOP(bin_inst, load_inst, store_inst) \
+#define ASSEMBLE_ATOMIC_BINOP(bin_inst, _type) \
do { \
- MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
- Label binop; \
- __ lwsync(); \
- __ bind(&binop); \
- __ load_inst(i.OutputRegister(), operand); \
- __ bin_inst(kScratchReg, i.OutputRegister(), i.InputRegister(2)); \
- __ store_inst(kScratchReg, operand); \
- __ bne(&binop, cr0); \
- __ sync(); \
- } while (false)
-
-#define ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(bin_inst, load_inst, store_inst, \
- ext_instr) \
- do { \
- MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
- Label binop; \
- __ lwsync(); \
- __ bind(&binop); \
- __ load_inst(i.OutputRegister(), operand); \
- __ ext_instr(i.OutputRegister(), i.OutputRegister()); \
- __ bin_inst(kScratchReg, i.OutputRegister(), i.InputRegister(2)); \
- __ store_inst(kScratchReg, operand); \
- __ bne(&binop, cr0); \
- __ sync(); \
- } while (false)
-
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp_inst, load_inst, store_inst, \
- input_ext) \
- do { \
- MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
- Label loop; \
- Label exit; \
- __ input_ext(r0, i.InputRegister(2)); \
- __ lwsync(); \
- __ bind(&loop); \
- __ load_inst(i.OutputRegister(), operand); \
- __ cmp_inst(i.OutputRegister(), r0, cr0); \
- __ bne(&exit, cr0); \
- __ store_inst(i.InputRegister(3), operand); \
- __ bne(&loop, cr0); \
- __ bind(&exit); \
- __ sync(); \
- } while (false)
-
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp_inst, load_inst, \
- store_inst, ext_instr) \
- do { \
- MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
- Label loop; \
- Label exit; \
- __ ext_instr(r0, i.InputRegister(2)); \
- __ lwsync(); \
- __ bind(&loop); \
- __ load_inst(i.OutputRegister(), operand); \
- __ ext_instr(i.OutputRegister(), i.OutputRegister()); \
- __ cmp_inst(i.OutputRegister(), r0, cr0); \
- __ bne(&exit, cr0); \
- __ store_inst(i.InputRegister(3), operand); \
- __ bne(&loop, cr0); \
- __ bind(&exit); \
- __ sync(); \
+ auto bin_op = [&](Register dst, Register lhs, Register rhs) { \
+ if (std::is_signed<_type>::value) { \
+ switch (sizeof(_type)) { \
+ case 1: \
+ __ extsb(dst, lhs); \
+ break; \
+ case 2: \
+ __ extsh(dst, lhs); \
+ break; \
+ case 4: \
+ __ extsw(dst, lhs); \
+ break; \
+ case 8: \
+ break; \
+ default: \
+ UNREACHABLE(); \
+ } \
+ __ bin_inst(dst, dst, rhs); \
+ } else { \
+ __ bin_inst(dst, lhs, rhs); \
+ } \
+ }; \
+ MemOperand dst_operand = \
+ MemOperand(i.InputRegister(0), i.InputRegister(1)); \
+ __ AtomicOps<_type>(dst_operand, i.InputRegister(2), i.OutputRegister(), \
+ kScratchReg, bin_op); \
+ break; \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -888,8 +854,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchPrepareCallCFunction: {
- int const num_parameters = MiscField::decode(instr->opcode());
- __ PrepareCallCFunction(num_parameters, kScratchReg);
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_gp_parameters + num_fp_parameters,
+ kScratchReg);
// Frame alignment requires using FP-relative frame addressing.
frame_access_state()->SetFrameAccessToFP();
break;
@@ -932,8 +900,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
break;
case kArchCallCFunction: {
- int misc_field = MiscField::decode(instr->opcode());
- int num_parameters = misc_field;
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const fp_param_field = FPParamField::decode(instr->opcode());
+ int num_fp_parameters = fp_param_field;
bool has_function_descriptor = false;
int offset = 20 * kInstrSize;
@@ -954,10 +923,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
#if ABI_USES_FUNCTION_DESCRIPTORS
// AIX/PPC64BE Linux uses a function descriptor
- int kNumParametersMask = kHasFunctionDescriptorBitMask - 1;
- num_parameters = kNumParametersMask & misc_field;
+ int kNumFPParametersMask = kHasFunctionDescriptorBitMask - 1;
+ num_fp_parameters = kNumFPParametersMask & fp_param_field;
has_function_descriptor =
- (misc_field & kHasFunctionDescriptorBitMask) != 0;
+ (fp_param_field & kHasFunctionDescriptorBitMask) != 0;
// AIX may emit 2 extra Load instructions under CallCFunctionHelper
// due to having function descriptor.
if (has_function_descriptor) {
@@ -980,10 +949,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif // V8_ENABLE_WEBASSEMBLY
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
- __ CallCFunction(ref, num_parameters, has_function_descriptor);
+ __ CallCFunction(ref, num_gp_parameters, num_fp_parameters,
+ has_function_descriptor);
} else {
Register func = i.InputRegister(0);
- __ CallCFunction(func, num_parameters, has_function_descriptor);
+ __ CallCFunction(func, num_gp_parameters, num_fp_parameters,
+ has_function_descriptor);
}
// TODO(miladfar): In the above block, kScratchReg must be populated with
// the strictly-correct PC, which is the return address at this spot. The
@@ -1026,13 +997,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleArchTableSwitch(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == r4);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ stop();
@@ -2015,66 +1986,94 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicLoadInt16:
UNREACHABLE();
case kAtomicExchangeInt8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
- __ extsb(i.OutputRegister(0), i.OutputRegister(0));
+ __ AtomicExchange<int8_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.OutputRegister());
break;
case kPPC_AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
+ __ AtomicExchange<uint8_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.OutputRegister());
break;
case kAtomicExchangeInt16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
- __ extsh(i.OutputRegister(0), i.OutputRegister(0));
+ __ AtomicExchange<int16_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.OutputRegister());
break;
case kPPC_AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
+ __ AtomicExchange<uint16_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.OutputRegister());
break;
case kPPC_AtomicExchangeWord32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lwarx, stwcx);
+ __ AtomicExchange<uint32_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.OutputRegister());
break;
case kPPC_AtomicExchangeWord64:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldarx, stdcx);
+ __ AtomicExchange<uint64_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.OutputRegister());
break;
case kAtomicCompareExchangeInt8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(CmpS64, lbarx, stbcx, extsb);
+ __ AtomicCompareExchange<int8_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
+ kScratchReg);
break;
case kPPC_AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(CmpS64, lbarx, stbcx, ZeroExtByte);
+ __ AtomicCompareExchange<uint8_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
+ kScratchReg);
break;
case kAtomicCompareExchangeInt16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(CmpS64, lharx, sthcx, extsh);
+ __ AtomicCompareExchange<int16_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
+ kScratchReg);
break;
case kPPC_AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(CmpS64, lharx, sthcx, ZeroExtHalfWord);
+ __ AtomicCompareExchange<uint16_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
+ kScratchReg);
break;
case kPPC_AtomicCompareExchangeWord32:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmpw, lwarx, stwcx, ZeroExtWord32);
+ __ AtomicCompareExchange<uint32_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
+ kScratchReg);
break;
case kPPC_AtomicCompareExchangeWord64:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(CmpS64, ldarx, stdcx, mr);
+ __ AtomicCompareExchange<uint64_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
+ kScratchReg);
break;
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kPPC_Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lbarx, stbcx, extsb); \
- break; \
- case kPPC_Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP(inst, lbarx, stbcx); \
- break; \
- case kPPC_Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lharx, sthcx, extsh); \
- break; \
- case kPPC_Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP(inst, lharx, sthcx); \
- break; \
- case kPPC_Atomic##op##Int32: \
- ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lwarx, stwcx, extsw); \
- break; \
- case kPPC_Atomic##op##Uint32: \
- ASSEMBLE_ATOMIC_BINOP(inst, lwarx, stwcx); \
- break; \
- case kPPC_Atomic##op##Int64: \
- case kPPC_Atomic##op##Uint64: \
- ASSEMBLE_ATOMIC_BINOP(inst, ldarx, stdcx); \
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kPPC_Atomic##op##Int8: \
+ ASSEMBLE_ATOMIC_BINOP(inst, int8_t); \
+ break; \
+ case kPPC_Atomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP(inst, uint8_t); \
+ break; \
+ case kPPC_Atomic##op##Int16: \
+ ASSEMBLE_ATOMIC_BINOP(inst, int16_t); \
+ break; \
+ case kPPC_Atomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP(inst, uint16_t); \
+ break; \
+ case kPPC_Atomic##op##Int32: \
+ ASSEMBLE_ATOMIC_BINOP(inst, int32_t); \
+ break; \
+ case kPPC_Atomic##op##Uint32: \
+ ASSEMBLE_ATOMIC_BINOP(inst, uint32_t); \
+ break; \
+ case kPPC_Atomic##op##Int64: \
+ case kPPC_Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(inst, uint64_t); \
break;
ATOMIC_BINOP_CASE(Add, add)
ATOMIC_BINOP_CASE(Sub, sub)
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index 4182e8b71b..4f9003257f 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -11,406 +11,411 @@ namespace compiler {
// PPC-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(PPC_Peek) \
- V(PPC_Sync) \
- V(PPC_And) \
- V(PPC_AndComplement) \
- V(PPC_Or) \
- V(PPC_OrComplement) \
- V(PPC_Xor) \
- V(PPC_ShiftLeft32) \
- V(PPC_ShiftLeft64) \
- V(PPC_ShiftLeftPair) \
- V(PPC_ShiftRight32) \
- V(PPC_ShiftRight64) \
- V(PPC_ShiftRightPair) \
- V(PPC_ShiftRightAlg32) \
- V(PPC_ShiftRightAlg64) \
- V(PPC_ShiftRightAlgPair) \
- V(PPC_RotRight32) \
- V(PPC_RotRight64) \
- V(PPC_Not) \
- V(PPC_RotLeftAndMask32) \
- V(PPC_RotLeftAndClear64) \
- V(PPC_RotLeftAndClearLeft64) \
- V(PPC_RotLeftAndClearRight64) \
- V(PPC_Add32) \
- V(PPC_Add64) \
- V(PPC_AddWithOverflow32) \
- V(PPC_AddPair) \
- V(PPC_AddDouble) \
- V(PPC_Sub) \
- V(PPC_SubWithOverflow32) \
- V(PPC_SubPair) \
- V(PPC_SubDouble) \
- V(PPC_Mul32) \
- V(PPC_Mul32WithHigh32) \
- V(PPC_Mul64) \
- V(PPC_MulHigh32) \
- V(PPC_MulHighU32) \
- V(PPC_MulPair) \
- V(PPC_MulDouble) \
- V(PPC_Div32) \
- V(PPC_Div64) \
- V(PPC_DivU32) \
- V(PPC_DivU64) \
- V(PPC_DivDouble) \
- V(PPC_Mod32) \
- V(PPC_Mod64) \
- V(PPC_ModU32) \
- V(PPC_ModU64) \
- V(PPC_ModDouble) \
- V(PPC_Neg) \
- V(PPC_NegDouble) \
- V(PPC_SqrtDouble) \
- V(PPC_FloorDouble) \
- V(PPC_CeilDouble) \
- V(PPC_TruncateDouble) \
- V(PPC_RoundDouble) \
- V(PPC_MaxDouble) \
- V(PPC_MinDouble) \
- V(PPC_AbsDouble) \
- V(PPC_Cntlz32) \
- V(PPC_Cntlz64) \
- V(PPC_Popcnt32) \
- V(PPC_Popcnt64) \
- V(PPC_Cmp32) \
- V(PPC_Cmp64) \
- V(PPC_CmpDouble) \
- V(PPC_Tst32) \
- V(PPC_Tst64) \
- V(PPC_Push) \
- V(PPC_PushFrame) \
- V(PPC_StoreToStackSlot) \
- V(PPC_ExtendSignWord8) \
- V(PPC_ExtendSignWord16) \
- V(PPC_ExtendSignWord32) \
- V(PPC_Uint32ToUint64) \
- V(PPC_Int64ToInt32) \
- V(PPC_Int64ToFloat32) \
- V(PPC_Int64ToDouble) \
- V(PPC_Uint64ToFloat32) \
- V(PPC_Uint64ToDouble) \
- V(PPC_Int32ToFloat32) \
- V(PPC_Int32ToDouble) \
- V(PPC_Uint32ToFloat32) \
- V(PPC_Float32ToInt32) \
- V(PPC_Float32ToUint32) \
- V(PPC_Uint32ToDouble) \
- V(PPC_Float32ToDouble) \
- V(PPC_Float64SilenceNaN) \
- V(PPC_DoubleToInt32) \
- V(PPC_DoubleToUint32) \
- V(PPC_DoubleToInt64) \
- V(PPC_DoubleToUint64) \
- V(PPC_DoubleToFloat32) \
- V(PPC_DoubleExtractLowWord32) \
- V(PPC_DoubleExtractHighWord32) \
- V(PPC_DoubleInsertLowWord32) \
- V(PPC_DoubleInsertHighWord32) \
- V(PPC_DoubleConstruct) \
- V(PPC_BitcastInt32ToFloat32) \
- V(PPC_BitcastFloat32ToInt32) \
- V(PPC_BitcastInt64ToDouble) \
- V(PPC_BitcastDoubleToInt64) \
- V(PPC_LoadWordS8) \
- V(PPC_LoadWordU8) \
- V(PPC_LoadWordS16) \
- V(PPC_LoadWordU16) \
- V(PPC_LoadWordS32) \
- V(PPC_LoadWordU32) \
- V(PPC_LoadByteRev32) \
- V(PPC_LoadWord64) \
- V(PPC_LoadByteRev64) \
- V(PPC_LoadFloat32) \
- V(PPC_LoadDouble) \
- V(PPC_LoadSimd128) \
- V(PPC_LoadReverseSimd128RR) \
- V(PPC_StoreWord8) \
- V(PPC_StoreWord16) \
- V(PPC_StoreWord32) \
- V(PPC_StoreByteRev32) \
- V(PPC_StoreWord64) \
- V(PPC_StoreByteRev64) \
- V(PPC_StoreFloat32) \
- V(PPC_StoreDouble) \
- V(PPC_StoreSimd128) \
- V(PPC_ByteRev32) \
- V(PPC_ByteRev64) \
- V(PPC_AtomicExchangeUint8) \
- V(PPC_AtomicExchangeUint16) \
- V(PPC_AtomicExchangeWord32) \
- V(PPC_AtomicExchangeWord64) \
- V(PPC_AtomicCompareExchangeUint8) \
- V(PPC_AtomicCompareExchangeUint16) \
- V(PPC_AtomicCompareExchangeWord32) \
- V(PPC_AtomicCompareExchangeWord64) \
- V(PPC_AtomicAddUint8) \
- V(PPC_AtomicAddUint16) \
- V(PPC_AtomicAddUint32) \
- V(PPC_AtomicAddUint64) \
- V(PPC_AtomicAddInt8) \
- V(PPC_AtomicAddInt16) \
- V(PPC_AtomicAddInt32) \
- V(PPC_AtomicAddInt64) \
- V(PPC_AtomicSubUint8) \
- V(PPC_AtomicSubUint16) \
- V(PPC_AtomicSubUint32) \
- V(PPC_AtomicSubUint64) \
- V(PPC_AtomicSubInt8) \
- V(PPC_AtomicSubInt16) \
- V(PPC_AtomicSubInt32) \
- V(PPC_AtomicSubInt64) \
- V(PPC_AtomicAndUint8) \
- V(PPC_AtomicAndUint16) \
- V(PPC_AtomicAndUint32) \
- V(PPC_AtomicAndUint64) \
- V(PPC_AtomicAndInt8) \
- V(PPC_AtomicAndInt16) \
- V(PPC_AtomicAndInt32) \
- V(PPC_AtomicAndInt64) \
- V(PPC_AtomicOrUint8) \
- V(PPC_AtomicOrUint16) \
- V(PPC_AtomicOrUint32) \
- V(PPC_AtomicOrUint64) \
- V(PPC_AtomicOrInt8) \
- V(PPC_AtomicOrInt16) \
- V(PPC_AtomicOrInt32) \
- V(PPC_AtomicOrInt64) \
- V(PPC_AtomicXorUint8) \
- V(PPC_AtomicXorUint16) \
- V(PPC_AtomicXorUint32) \
- V(PPC_AtomicXorUint64) \
- V(PPC_AtomicXorInt8) \
- V(PPC_AtomicXorInt16) \
- V(PPC_AtomicXorInt32) \
- V(PPC_AtomicXorInt64) \
- V(PPC_F64x2Splat) \
- V(PPC_F64x2ExtractLane) \
- V(PPC_F64x2ReplaceLane) \
- V(PPC_F64x2Add) \
- V(PPC_F64x2Sub) \
- V(PPC_F64x2Mul) \
- V(PPC_F64x2Eq) \
- V(PPC_F64x2Ne) \
- V(PPC_F64x2Le) \
- V(PPC_F64x2Lt) \
- V(PPC_F64x2Abs) \
- V(PPC_F64x2Neg) \
- V(PPC_F64x2Sqrt) \
- V(PPC_F64x2Qfma) \
- V(PPC_F64x2Qfms) \
- V(PPC_F64x2Div) \
- V(PPC_F64x2Min) \
- V(PPC_F64x2Max) \
- V(PPC_F64x2Ceil) \
- V(PPC_F64x2Floor) \
- V(PPC_F64x2Trunc) \
- V(PPC_F64x2Pmin) \
- V(PPC_F64x2Pmax) \
- V(PPC_F64x2ConvertLowI32x4S) \
- V(PPC_F64x2ConvertLowI32x4U) \
- V(PPC_F64x2PromoteLowF32x4) \
- V(PPC_F32x4Splat) \
- V(PPC_F32x4ExtractLane) \
- V(PPC_F32x4ReplaceLane) \
- V(PPC_F32x4Add) \
- V(PPC_F32x4Sub) \
- V(PPC_F32x4Mul) \
- V(PPC_F32x4Eq) \
- V(PPC_F32x4Ne) \
- V(PPC_F32x4Lt) \
- V(PPC_F32x4Le) \
- V(PPC_F32x4Abs) \
- V(PPC_F32x4Neg) \
- V(PPC_F32x4RecipApprox) \
- V(PPC_F32x4RecipSqrtApprox) \
- V(PPC_F32x4Sqrt) \
- V(PPC_F32x4SConvertI32x4) \
- V(PPC_F32x4UConvertI32x4) \
- V(PPC_F32x4Div) \
- V(PPC_F32x4Min) \
- V(PPC_F32x4Max) \
- V(PPC_F32x4Ceil) \
- V(PPC_F32x4Floor) \
- V(PPC_F32x4Trunc) \
- V(PPC_F32x4Pmin) \
- V(PPC_F32x4Pmax) \
- V(PPC_F32x4Qfma) \
- V(PPC_F32x4Qfms) \
- V(PPC_F32x4DemoteF64x2Zero) \
- V(PPC_I64x2Splat) \
- V(PPC_I64x2ExtractLane) \
- V(PPC_I64x2ReplaceLane) \
- V(PPC_I64x2Add) \
- V(PPC_I64x2Sub) \
- V(PPC_I64x2Mul) \
- V(PPC_I64x2Eq) \
- V(PPC_I64x2Ne) \
- V(PPC_I64x2GtS) \
- V(PPC_I64x2GeS) \
- V(PPC_I64x2Shl) \
- V(PPC_I64x2ShrS) \
- V(PPC_I64x2ShrU) \
- V(PPC_I64x2Neg) \
- V(PPC_I64x2BitMask) \
- V(PPC_I64x2SConvertI32x4Low) \
- V(PPC_I64x2SConvertI32x4High) \
- V(PPC_I64x2UConvertI32x4Low) \
- V(PPC_I64x2UConvertI32x4High) \
- V(PPC_I64x2ExtMulLowI32x4S) \
- V(PPC_I64x2ExtMulHighI32x4S) \
- V(PPC_I64x2ExtMulLowI32x4U) \
- V(PPC_I64x2ExtMulHighI32x4U) \
- V(PPC_I64x2Abs) \
- V(PPC_I32x4Splat) \
- V(PPC_I32x4ExtractLane) \
- V(PPC_I32x4ReplaceLane) \
- V(PPC_I32x4Add) \
- V(PPC_I32x4Sub) \
- V(PPC_I32x4Mul) \
- V(PPC_I32x4MinS) \
- V(PPC_I32x4MinU) \
- V(PPC_I32x4MaxS) \
- V(PPC_I32x4MaxU) \
- V(PPC_I32x4Eq) \
- V(PPC_I32x4Ne) \
- V(PPC_I32x4GtS) \
- V(PPC_I32x4GeS) \
- V(PPC_I32x4GtU) \
- V(PPC_I32x4GeU) \
- V(PPC_I32x4Shl) \
- V(PPC_I32x4ShrS) \
- V(PPC_I32x4ShrU) \
- V(PPC_I32x4Neg) \
- V(PPC_I32x4Abs) \
- V(PPC_I32x4SConvertF32x4) \
- V(PPC_I32x4UConvertF32x4) \
- V(PPC_I32x4SConvertI16x8Low) \
- V(PPC_I32x4SConvertI16x8High) \
- V(PPC_I32x4UConvertI16x8Low) \
- V(PPC_I32x4UConvertI16x8High) \
- V(PPC_I32x4BitMask) \
- V(PPC_I32x4DotI16x8S) \
- V(PPC_I32x4ExtAddPairwiseI16x8S) \
- V(PPC_I32x4ExtAddPairwiseI16x8U) \
- V(PPC_I32x4ExtMulLowI16x8S) \
- V(PPC_I32x4ExtMulHighI16x8S) \
- V(PPC_I32x4ExtMulLowI16x8U) \
- V(PPC_I32x4ExtMulHighI16x8U) \
- V(PPC_I32x4TruncSatF64x2SZero) \
- V(PPC_I32x4TruncSatF64x2UZero) \
- V(PPC_I16x8Splat) \
- V(PPC_I16x8ExtractLaneU) \
- V(PPC_I16x8ExtractLaneS) \
- V(PPC_I16x8ReplaceLane) \
- V(PPC_I16x8Add) \
- V(PPC_I16x8Sub) \
- V(PPC_I16x8Mul) \
- V(PPC_I16x8MinS) \
- V(PPC_I16x8MinU) \
- V(PPC_I16x8MaxS) \
- V(PPC_I16x8MaxU) \
- V(PPC_I16x8Eq) \
- V(PPC_I16x8Ne) \
- V(PPC_I16x8GtS) \
- V(PPC_I16x8GeS) \
- V(PPC_I16x8GtU) \
- V(PPC_I16x8GeU) \
- V(PPC_I16x8Shl) \
- V(PPC_I16x8ShrS) \
- V(PPC_I16x8ShrU) \
- V(PPC_I16x8Neg) \
- V(PPC_I16x8Abs) \
- V(PPC_I16x8SConvertI32x4) \
- V(PPC_I16x8UConvertI32x4) \
- V(PPC_I16x8SConvertI8x16Low) \
- V(PPC_I16x8SConvertI8x16High) \
- V(PPC_I16x8UConvertI8x16Low) \
- V(PPC_I16x8UConvertI8x16High) \
- V(PPC_I16x8AddSatS) \
- V(PPC_I16x8SubSatS) \
- V(PPC_I16x8AddSatU) \
- V(PPC_I16x8SubSatU) \
- V(PPC_I16x8RoundingAverageU) \
- V(PPC_I16x8BitMask) \
- V(PPC_I16x8ExtAddPairwiseI8x16S) \
- V(PPC_I16x8ExtAddPairwiseI8x16U) \
- V(PPC_I16x8Q15MulRSatS) \
- V(PPC_I16x8ExtMulLowI8x16S) \
- V(PPC_I16x8ExtMulHighI8x16S) \
- V(PPC_I16x8ExtMulLowI8x16U) \
- V(PPC_I16x8ExtMulHighI8x16U) \
- V(PPC_I8x16Splat) \
- V(PPC_I8x16ExtractLaneU) \
- V(PPC_I8x16ExtractLaneS) \
- V(PPC_I8x16ReplaceLane) \
- V(PPC_I8x16Add) \
- V(PPC_I8x16Sub) \
- V(PPC_I8x16MinS) \
- V(PPC_I8x16MinU) \
- V(PPC_I8x16MaxS) \
- V(PPC_I8x16MaxU) \
- V(PPC_I8x16Eq) \
- V(PPC_I8x16Ne) \
- V(PPC_I8x16GtS) \
- V(PPC_I8x16GeS) \
- V(PPC_I8x16GtU) \
- V(PPC_I8x16GeU) \
- V(PPC_I8x16Shl) \
- V(PPC_I8x16ShrS) \
- V(PPC_I8x16ShrU) \
- V(PPC_I8x16Neg) \
- V(PPC_I8x16Abs) \
- V(PPC_I8x16SConvertI16x8) \
- V(PPC_I8x16UConvertI16x8) \
- V(PPC_I8x16AddSatS) \
- V(PPC_I8x16SubSatS) \
- V(PPC_I8x16AddSatU) \
- V(PPC_I8x16SubSatU) \
- V(PPC_I8x16RoundingAverageU) \
- V(PPC_I8x16Shuffle) \
- V(PPC_I8x16Swizzle) \
- V(PPC_I8x16BitMask) \
- V(PPC_I8x16Popcnt) \
- V(PPC_I64x2AllTrue) \
- V(PPC_I32x4AllTrue) \
- V(PPC_I16x8AllTrue) \
- V(PPC_I8x16AllTrue) \
- V(PPC_V128AnyTrue) \
- V(PPC_S128And) \
- V(PPC_S128Or) \
- V(PPC_S128Xor) \
- V(PPC_S128Const) \
- V(PPC_S128Zero) \
- V(PPC_S128AllOnes) \
- V(PPC_S128Not) \
- V(PPC_S128Select) \
- V(PPC_S128AndNot) \
- V(PPC_S128Load8Splat) \
- V(PPC_S128Load16Splat) \
- V(PPC_S128Load32Splat) \
- V(PPC_S128Load64Splat) \
- V(PPC_S128Load8x8S) \
- V(PPC_S128Load8x8U) \
- V(PPC_S128Load16x4S) \
- V(PPC_S128Load16x4U) \
- V(PPC_S128Load32x2S) \
- V(PPC_S128Load32x2U) \
- V(PPC_S128Load32Zero) \
- V(PPC_S128Load64Zero) \
- V(PPC_S128Load8Lane) \
- V(PPC_S128Load16Lane) \
- V(PPC_S128Load32Lane) \
- V(PPC_S128Load64Lane) \
- V(PPC_S128Store8Lane) \
- V(PPC_S128Store16Lane) \
- V(PPC_S128Store32Lane) \
- V(PPC_S128Store64Lane) \
- V(PPC_StoreCompressTagged) \
- V(PPC_LoadDecompressTaggedSigned) \
- V(PPC_LoadDecompressTaggedPointer) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(PPC_Peek) \
+ V(PPC_Sync) \
+ V(PPC_And) \
+ V(PPC_AndComplement) \
+ V(PPC_Or) \
+ V(PPC_OrComplement) \
+ V(PPC_Xor) \
+ V(PPC_ShiftLeft32) \
+ V(PPC_ShiftLeft64) \
+ V(PPC_ShiftLeftPair) \
+ V(PPC_ShiftRight32) \
+ V(PPC_ShiftRight64) \
+ V(PPC_ShiftRightPair) \
+ V(PPC_ShiftRightAlg32) \
+ V(PPC_ShiftRightAlg64) \
+ V(PPC_ShiftRightAlgPair) \
+ V(PPC_RotRight32) \
+ V(PPC_RotRight64) \
+ V(PPC_Not) \
+ V(PPC_RotLeftAndMask32) \
+ V(PPC_RotLeftAndClear64) \
+ V(PPC_RotLeftAndClearLeft64) \
+ V(PPC_RotLeftAndClearRight64) \
+ V(PPC_Add32) \
+ V(PPC_Add64) \
+ V(PPC_AddWithOverflow32) \
+ V(PPC_AddPair) \
+ V(PPC_AddDouble) \
+ V(PPC_Sub) \
+ V(PPC_SubWithOverflow32) \
+ V(PPC_SubPair) \
+ V(PPC_SubDouble) \
+ V(PPC_Mul32) \
+ V(PPC_Mul32WithHigh32) \
+ V(PPC_Mul64) \
+ V(PPC_MulHigh32) \
+ V(PPC_MulHighU32) \
+ V(PPC_MulPair) \
+ V(PPC_MulDouble) \
+ V(PPC_Div32) \
+ V(PPC_Div64) \
+ V(PPC_DivU32) \
+ V(PPC_DivU64) \
+ V(PPC_DivDouble) \
+ V(PPC_Mod32) \
+ V(PPC_Mod64) \
+ V(PPC_ModU32) \
+ V(PPC_ModU64) \
+ V(PPC_ModDouble) \
+ V(PPC_Neg) \
+ V(PPC_NegDouble) \
+ V(PPC_SqrtDouble) \
+ V(PPC_FloorDouble) \
+ V(PPC_CeilDouble) \
+ V(PPC_TruncateDouble) \
+ V(PPC_RoundDouble) \
+ V(PPC_MaxDouble) \
+ V(PPC_MinDouble) \
+ V(PPC_AbsDouble) \
+ V(PPC_Cntlz32) \
+ V(PPC_Cntlz64) \
+ V(PPC_Popcnt32) \
+ V(PPC_Popcnt64) \
+ V(PPC_Cmp32) \
+ V(PPC_Cmp64) \
+ V(PPC_CmpDouble) \
+ V(PPC_Tst32) \
+ V(PPC_Tst64) \
+ V(PPC_Push) \
+ V(PPC_PushFrame) \
+ V(PPC_StoreToStackSlot) \
+ V(PPC_ExtendSignWord8) \
+ V(PPC_ExtendSignWord16) \
+ V(PPC_ExtendSignWord32) \
+ V(PPC_Uint32ToUint64) \
+ V(PPC_Int64ToInt32) \
+ V(PPC_Int64ToFloat32) \
+ V(PPC_Int64ToDouble) \
+ V(PPC_Uint64ToFloat32) \
+ V(PPC_Uint64ToDouble) \
+ V(PPC_Int32ToFloat32) \
+ V(PPC_Int32ToDouble) \
+ V(PPC_Uint32ToFloat32) \
+ V(PPC_Float32ToInt32) \
+ V(PPC_Float32ToUint32) \
+ V(PPC_Uint32ToDouble) \
+ V(PPC_Float32ToDouble) \
+ V(PPC_Float64SilenceNaN) \
+ V(PPC_DoubleToInt32) \
+ V(PPC_DoubleToUint32) \
+ V(PPC_DoubleToInt64) \
+ V(PPC_DoubleToUint64) \
+ V(PPC_DoubleToFloat32) \
+ V(PPC_DoubleExtractLowWord32) \
+ V(PPC_DoubleExtractHighWord32) \
+ V(PPC_DoubleInsertLowWord32) \
+ V(PPC_DoubleInsertHighWord32) \
+ V(PPC_DoubleConstruct) \
+ V(PPC_BitcastInt32ToFloat32) \
+ V(PPC_BitcastFloat32ToInt32) \
+ V(PPC_BitcastInt64ToDouble) \
+ V(PPC_BitcastDoubleToInt64) \
+ V(PPC_LoadWordS8) \
+ V(PPC_LoadWordU8) \
+ V(PPC_LoadWordS16) \
+ V(PPC_LoadWordU16) \
+ V(PPC_LoadWordS32) \
+ V(PPC_LoadWordU32) \
+ V(PPC_LoadByteRev32) \
+ V(PPC_LoadWord64) \
+ V(PPC_LoadByteRev64) \
+ V(PPC_LoadFloat32) \
+ V(PPC_LoadDouble) \
+ V(PPC_LoadSimd128) \
+ V(PPC_LoadReverseSimd128RR) \
+ V(PPC_StoreWord8) \
+ V(PPC_StoreWord16) \
+ V(PPC_StoreWord32) \
+ V(PPC_StoreByteRev32) \
+ V(PPC_StoreWord64) \
+ V(PPC_StoreByteRev64) \
+ V(PPC_StoreFloat32) \
+ V(PPC_StoreDouble) \
+ V(PPC_StoreSimd128) \
+ V(PPC_ByteRev32) \
+ V(PPC_ByteRev64) \
+ V(PPC_AtomicExchangeUint8) \
+ V(PPC_AtomicExchangeUint16) \
+ V(PPC_AtomicExchangeWord32) \
+ V(PPC_AtomicExchangeWord64) \
+ V(PPC_AtomicCompareExchangeUint8) \
+ V(PPC_AtomicCompareExchangeUint16) \
+ V(PPC_AtomicCompareExchangeWord32) \
+ V(PPC_AtomicCompareExchangeWord64) \
+ V(PPC_AtomicAddUint8) \
+ V(PPC_AtomicAddUint16) \
+ V(PPC_AtomicAddUint32) \
+ V(PPC_AtomicAddUint64) \
+ V(PPC_AtomicAddInt8) \
+ V(PPC_AtomicAddInt16) \
+ V(PPC_AtomicAddInt32) \
+ V(PPC_AtomicAddInt64) \
+ V(PPC_AtomicSubUint8) \
+ V(PPC_AtomicSubUint16) \
+ V(PPC_AtomicSubUint32) \
+ V(PPC_AtomicSubUint64) \
+ V(PPC_AtomicSubInt8) \
+ V(PPC_AtomicSubInt16) \
+ V(PPC_AtomicSubInt32) \
+ V(PPC_AtomicSubInt64) \
+ V(PPC_AtomicAndUint8) \
+ V(PPC_AtomicAndUint16) \
+ V(PPC_AtomicAndUint32) \
+ V(PPC_AtomicAndUint64) \
+ V(PPC_AtomicAndInt8) \
+ V(PPC_AtomicAndInt16) \
+ V(PPC_AtomicAndInt32) \
+ V(PPC_AtomicAndInt64) \
+ V(PPC_AtomicOrUint8) \
+ V(PPC_AtomicOrUint16) \
+ V(PPC_AtomicOrUint32) \
+ V(PPC_AtomicOrUint64) \
+ V(PPC_AtomicOrInt8) \
+ V(PPC_AtomicOrInt16) \
+ V(PPC_AtomicOrInt32) \
+ V(PPC_AtomicOrInt64) \
+ V(PPC_AtomicXorUint8) \
+ V(PPC_AtomicXorUint16) \
+ V(PPC_AtomicXorUint32) \
+ V(PPC_AtomicXorUint64) \
+ V(PPC_AtomicXorInt8) \
+ V(PPC_AtomicXorInt16) \
+ V(PPC_AtomicXorInt32) \
+ V(PPC_AtomicXorInt64) \
+ V(PPC_F64x2Splat) \
+ V(PPC_F64x2ExtractLane) \
+ V(PPC_F64x2ReplaceLane) \
+ V(PPC_F64x2Add) \
+ V(PPC_F64x2Sub) \
+ V(PPC_F64x2Mul) \
+ V(PPC_F64x2Eq) \
+ V(PPC_F64x2Ne) \
+ V(PPC_F64x2Le) \
+ V(PPC_F64x2Lt) \
+ V(PPC_F64x2Abs) \
+ V(PPC_F64x2Neg) \
+ V(PPC_F64x2Sqrt) \
+ V(PPC_F64x2Qfma) \
+ V(PPC_F64x2Qfms) \
+ V(PPC_F64x2Div) \
+ V(PPC_F64x2Min) \
+ V(PPC_F64x2Max) \
+ V(PPC_F64x2Ceil) \
+ V(PPC_F64x2Floor) \
+ V(PPC_F64x2Trunc) \
+ V(PPC_F64x2Pmin) \
+ V(PPC_F64x2Pmax) \
+ V(PPC_F64x2ConvertLowI32x4S) \
+ V(PPC_F64x2ConvertLowI32x4U) \
+ V(PPC_F64x2PromoteLowF32x4) \
+ V(PPC_F32x4Splat) \
+ V(PPC_F32x4ExtractLane) \
+ V(PPC_F32x4ReplaceLane) \
+ V(PPC_F32x4Add) \
+ V(PPC_F32x4Sub) \
+ V(PPC_F32x4Mul) \
+ V(PPC_F32x4Eq) \
+ V(PPC_F32x4Ne) \
+ V(PPC_F32x4Lt) \
+ V(PPC_F32x4Le) \
+ V(PPC_F32x4Abs) \
+ V(PPC_F32x4Neg) \
+ V(PPC_F32x4RecipApprox) \
+ V(PPC_F32x4RecipSqrtApprox) \
+ V(PPC_F32x4Sqrt) \
+ V(PPC_F32x4SConvertI32x4) \
+ V(PPC_F32x4UConvertI32x4) \
+ V(PPC_F32x4Div) \
+ V(PPC_F32x4Min) \
+ V(PPC_F32x4Max) \
+ V(PPC_F32x4Ceil) \
+ V(PPC_F32x4Floor) \
+ V(PPC_F32x4Trunc) \
+ V(PPC_F32x4Pmin) \
+ V(PPC_F32x4Pmax) \
+ V(PPC_F32x4Qfma) \
+ V(PPC_F32x4Qfms) \
+ V(PPC_F32x4DemoteF64x2Zero) \
+ V(PPC_I64x2Splat) \
+ V(PPC_I64x2ExtractLane) \
+ V(PPC_I64x2ReplaceLane) \
+ V(PPC_I64x2Add) \
+ V(PPC_I64x2Sub) \
+ V(PPC_I64x2Mul) \
+ V(PPC_I64x2Eq) \
+ V(PPC_I64x2Ne) \
+ V(PPC_I64x2GtS) \
+ V(PPC_I64x2GeS) \
+ V(PPC_I64x2Shl) \
+ V(PPC_I64x2ShrS) \
+ V(PPC_I64x2ShrU) \
+ V(PPC_I64x2Neg) \
+ V(PPC_I64x2BitMask) \
+ V(PPC_I64x2SConvertI32x4Low) \
+ V(PPC_I64x2SConvertI32x4High) \
+ V(PPC_I64x2UConvertI32x4Low) \
+ V(PPC_I64x2UConvertI32x4High) \
+ V(PPC_I64x2ExtMulLowI32x4S) \
+ V(PPC_I64x2ExtMulHighI32x4S) \
+ V(PPC_I64x2ExtMulLowI32x4U) \
+ V(PPC_I64x2ExtMulHighI32x4U) \
+ V(PPC_I64x2Abs) \
+ V(PPC_I32x4Splat) \
+ V(PPC_I32x4ExtractLane) \
+ V(PPC_I32x4ReplaceLane) \
+ V(PPC_I32x4Add) \
+ V(PPC_I32x4Sub) \
+ V(PPC_I32x4Mul) \
+ V(PPC_I32x4MinS) \
+ V(PPC_I32x4MinU) \
+ V(PPC_I32x4MaxS) \
+ V(PPC_I32x4MaxU) \
+ V(PPC_I32x4Eq) \
+ V(PPC_I32x4Ne) \
+ V(PPC_I32x4GtS) \
+ V(PPC_I32x4GeS) \
+ V(PPC_I32x4GtU) \
+ V(PPC_I32x4GeU) \
+ V(PPC_I32x4Shl) \
+ V(PPC_I32x4ShrS) \
+ V(PPC_I32x4ShrU) \
+ V(PPC_I32x4Neg) \
+ V(PPC_I32x4Abs) \
+ V(PPC_I32x4SConvertF32x4) \
+ V(PPC_I32x4UConvertF32x4) \
+ V(PPC_I32x4SConvertI16x8Low) \
+ V(PPC_I32x4SConvertI16x8High) \
+ V(PPC_I32x4UConvertI16x8Low) \
+ V(PPC_I32x4UConvertI16x8High) \
+ V(PPC_I32x4BitMask) \
+ V(PPC_I32x4DotI16x8S) \
+ V(PPC_I32x4ExtAddPairwiseI16x8S) \
+ V(PPC_I32x4ExtAddPairwiseI16x8U) \
+ V(PPC_I32x4ExtMulLowI16x8S) \
+ V(PPC_I32x4ExtMulHighI16x8S) \
+ V(PPC_I32x4ExtMulLowI16x8U) \
+ V(PPC_I32x4ExtMulHighI16x8U) \
+ V(PPC_I32x4TruncSatF64x2SZero) \
+ V(PPC_I32x4TruncSatF64x2UZero) \
+ V(PPC_I16x8Splat) \
+ V(PPC_I16x8ExtractLaneU) \
+ V(PPC_I16x8ExtractLaneS) \
+ V(PPC_I16x8ReplaceLane) \
+ V(PPC_I16x8Add) \
+ V(PPC_I16x8Sub) \
+ V(PPC_I16x8Mul) \
+ V(PPC_I16x8MinS) \
+ V(PPC_I16x8MinU) \
+ V(PPC_I16x8MaxS) \
+ V(PPC_I16x8MaxU) \
+ V(PPC_I16x8Eq) \
+ V(PPC_I16x8Ne) \
+ V(PPC_I16x8GtS) \
+ V(PPC_I16x8GeS) \
+ V(PPC_I16x8GtU) \
+ V(PPC_I16x8GeU) \
+ V(PPC_I16x8Shl) \
+ V(PPC_I16x8ShrS) \
+ V(PPC_I16x8ShrU) \
+ V(PPC_I16x8Neg) \
+ V(PPC_I16x8Abs) \
+ V(PPC_I16x8SConvertI32x4) \
+ V(PPC_I16x8UConvertI32x4) \
+ V(PPC_I16x8SConvertI8x16Low) \
+ V(PPC_I16x8SConvertI8x16High) \
+ V(PPC_I16x8UConvertI8x16Low) \
+ V(PPC_I16x8UConvertI8x16High) \
+ V(PPC_I16x8AddSatS) \
+ V(PPC_I16x8SubSatS) \
+ V(PPC_I16x8AddSatU) \
+ V(PPC_I16x8SubSatU) \
+ V(PPC_I16x8RoundingAverageU) \
+ V(PPC_I16x8BitMask) \
+ V(PPC_I16x8ExtAddPairwiseI8x16S) \
+ V(PPC_I16x8ExtAddPairwiseI8x16U) \
+ V(PPC_I16x8Q15MulRSatS) \
+ V(PPC_I16x8ExtMulLowI8x16S) \
+ V(PPC_I16x8ExtMulHighI8x16S) \
+ V(PPC_I16x8ExtMulLowI8x16U) \
+ V(PPC_I16x8ExtMulHighI8x16U) \
+ V(PPC_I8x16Splat) \
+ V(PPC_I8x16ExtractLaneU) \
+ V(PPC_I8x16ExtractLaneS) \
+ V(PPC_I8x16ReplaceLane) \
+ V(PPC_I8x16Add) \
+ V(PPC_I8x16Sub) \
+ V(PPC_I8x16MinS) \
+ V(PPC_I8x16MinU) \
+ V(PPC_I8x16MaxS) \
+ V(PPC_I8x16MaxU) \
+ V(PPC_I8x16Eq) \
+ V(PPC_I8x16Ne) \
+ V(PPC_I8x16GtS) \
+ V(PPC_I8x16GeS) \
+ V(PPC_I8x16GtU) \
+ V(PPC_I8x16GeU) \
+ V(PPC_I8x16Shl) \
+ V(PPC_I8x16ShrS) \
+ V(PPC_I8x16ShrU) \
+ V(PPC_I8x16Neg) \
+ V(PPC_I8x16Abs) \
+ V(PPC_I8x16SConvertI16x8) \
+ V(PPC_I8x16UConvertI16x8) \
+ V(PPC_I8x16AddSatS) \
+ V(PPC_I8x16SubSatS) \
+ V(PPC_I8x16AddSatU) \
+ V(PPC_I8x16SubSatU) \
+ V(PPC_I8x16RoundingAverageU) \
+ V(PPC_I8x16Shuffle) \
+ V(PPC_I8x16Swizzle) \
+ V(PPC_I8x16BitMask) \
+ V(PPC_I8x16Popcnt) \
+ V(PPC_I64x2AllTrue) \
+ V(PPC_I32x4AllTrue) \
+ V(PPC_I16x8AllTrue) \
+ V(PPC_I8x16AllTrue) \
+ V(PPC_V128AnyTrue) \
+ V(PPC_S128And) \
+ V(PPC_S128Or) \
+ V(PPC_S128Xor) \
+ V(PPC_S128Const) \
+ V(PPC_S128Zero) \
+ V(PPC_S128AllOnes) \
+ V(PPC_S128Not) \
+ V(PPC_S128Select) \
+ V(PPC_S128AndNot) \
+ V(PPC_S128Load8Splat) \
+ V(PPC_S128Load16Splat) \
+ V(PPC_S128Load32Splat) \
+ V(PPC_S128Load64Splat) \
+ V(PPC_S128Load8x8S) \
+ V(PPC_S128Load8x8U) \
+ V(PPC_S128Load16x4S) \
+ V(PPC_S128Load16x4U) \
+ V(PPC_S128Load32x2S) \
+ V(PPC_S128Load32x2U) \
+ V(PPC_S128Load32Zero) \
+ V(PPC_S128Load64Zero) \
+ V(PPC_S128Load8Lane) \
+ V(PPC_S128Load16Lane) \
+ V(PPC_S128Load32Lane) \
+ V(PPC_S128Load64Lane) \
+ V(PPC_S128Store8Lane) \
+ V(PPC_S128Store16Lane) \
+ V(PPC_S128Store32Lane) \
+ V(PPC_S128Store64Lane) \
+ V(PPC_StoreCompressTagged) \
+ V(PPC_LoadDecompressTaggedSigned) \
+ V(PPC_LoadDecompressTaggedPointer) \
V(PPC_LoadDecompressAnyTagged)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index bfa7c0a6e0..28f071ec68 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -162,9 +162,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
PPCOperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r4));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), r4));
}
static void VisitLoadCommon(InstructionSelector* selector, Node* node,
diff --git a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
index 559378b19b..c95299ee1d 100644
--- a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
@@ -441,8 +441,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
__ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
__ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
size, sign_extend); \
- __ ExtractBits(i.InputRegister(2), i.InputRegister(2), i.TempRegister(1), \
- size, sign_extend); \
+ __ ExtractBits(i.InputRegister(2), i.InputRegister(2), 0, size, \
+ sign_extend); \
__ BranchShort(&exit, ne, i.InputRegister(2), \
Operand(i.OutputRegister(0))); \
__ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
@@ -743,13 +743,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == a0);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ stop();
@@ -2049,6 +2049,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
+ case kRiscvF32x4Splat: {
+ (__ VU).set(kScratchReg, E32, m1);
+ __ fmv_x_w(kScratchReg, i.InputSingleRegister(0));
+ __ vmv_vx(i.OutputSimd128Register(), kScratchReg);
+ break;
+ }
+ case kRiscvF64x2Splat: {
+ (__ VU).set(kScratchReg, E64, m1);
+ __ fmv_x_d(kScratchReg, i.InputDoubleRegister(0));
+ __ vmv_vx(i.OutputSimd128Register(), kScratchReg);
+ break;
+ }
case kRiscvI32x4Abs: {
__ VU.set(kScratchReg, E32, m1);
__ vmv_vx(kSimd128RegZero, zero_reg);
@@ -2144,12 +2156,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kRiscvI32x4GtS: {
- __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ __ WasmRvvGtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), E32, m1);
break;
}
case kRiscvI64x2GtS: {
- __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ __ WasmRvvGtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), E64, m1);
break;
}
@@ -2392,6 +2404,171 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vor_vv(dst, dst, kSimd128ScratchReg);
break;
}
+ case kRiscvF32x4Abs: {
+ __ VU.set(kScratchReg, VSew::E32, Vlmul::m1);
+ __ vfabs_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF64x2Abs: {
+ __ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
+ __ vfabs_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4Neg: {
+ __ VU.set(kScratchReg, VSew::E32, Vlmul::m1);
+ __ vfneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF64x2Neg: {
+ __ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
+ __ vfneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4DemoteF64x2Zero: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vfncvt_f_f_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ vmv_vi(v0, 12);
+ __ vmerge_vx(i.OutputSimd128Register(), zero_reg,
+ i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvF32x4Add: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vfadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvF32x4Sub: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vfsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvF64x2Add: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vfadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvF64x2Sub: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vfsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvF32x4Ceil: {
+ __ Ceil_f(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF64x2Ceil: {
+ __ Ceil_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF32x4Floor: {
+ __ Floor_f(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF64x2Floor: {
+ __ Floor_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvS128Select: {
+ __ VU.set(kScratchReg, E8, m1);
+ __ vand_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ __ vnot_vv(kSimd128ScratchReg2, i.InputSimd128Register(0));
+ __ vand_vv(kSimd128ScratchReg2, i.InputSimd128Register(2),
+ kSimd128ScratchReg2);
+ __ vor_vv(i.OutputSimd128Register(), kSimd128ScratchReg,
+ kSimd128ScratchReg2);
+ break;
+ }
+ case kRiscvF32x4UConvertI32x4: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ VU.set(RoundingMode::RTZ);
+ __ vfcvt_f_xu_v(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4SConvertI32x4: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ VU.set(RoundingMode::RTZ);
+ __ vfcvt_f_x_v(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4Div: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ VU.set(RoundingMode::RTZ);
+ __ vfdiv_vv(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4Mul: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ VU.set(RoundingMode::RTZ);
+ __ vfmul_vv(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4Eq: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmfeq_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
+ __ vmv_vx(i.OutputSimd128Register(), zero_reg);
+ __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvF32x4Ne: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmfne_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
+ __ vmv_vx(i.OutputSimd128Register(), zero_reg);
+ __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvF32x4Lt: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmflt_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
+ __ vmv_vx(i.OutputSimd128Register(), zero_reg);
+ __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvF32x4Le: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmfle_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
+ __ vmv_vx(i.OutputSimd128Register(), zero_reg);
+ __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvF32x4Max: {
+ __ VU.set(kScratchReg, E32, m1);
+ const int32_t kNaN = 0x7FC00000;
+ __ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0));
+ __ vmfeq_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(1));
+ __ vand_vv(v0, v0, kSimd128ScratchReg);
+ __ li(kScratchReg, kNaN);
+ __ vmv_vx(kSimd128ScratchReg, kScratchReg);
+ __ vfmax_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0), Mask);
+ __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF32x4Min: {
+ __ VU.set(kScratchReg, E32, m1);
+ const int32_t kNaN = 0x7FC00000;
+ __ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0));
+ __ vmfeq_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(1));
+ __ vand_vv(v0, v0, kSimd128ScratchReg);
+ __ li(kScratchReg, kNaN);
+ __ vmv_vx(kSimd128ScratchReg, kScratchReg);
+ __ vfmin_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0), Mask);
+ __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
default:
#ifdef DEBUG
switch (arch_opcode) {
@@ -3061,7 +3238,18 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); }
void CodeGenerator::PrepareForDeoptimizationExits(
- ZoneDeque<DeoptimizationExit*>* exits) {}
+ ZoneDeque<DeoptimizationExit*>* exits) {
+ __ ForceConstantPoolEmissionWithoutJump();
+ int total_size = 0;
+ for (DeoptimizationExit* exit : deoptimization_exits_) {
+ total_size += (exit->kind() == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize;
+ }
+
+ __ CheckTrampolinePoolQuick(total_size);
+ DCHECK(Deoptimizer::kSupportsFixedDeoptExitSizes);
+}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
index 0c8d99a8e8..f3aa0f29a8 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
@@ -9,396 +9,400 @@ namespace v8 {
namespace internal {
namespace compiler {
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
// RISC-V-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(RiscvAdd32) \
- V(RiscvAdd64) \
- V(RiscvAddOvf64) \
- V(RiscvSub32) \
- V(RiscvSub64) \
- V(RiscvSubOvf64) \
- V(RiscvMul32) \
- V(RiscvMulOvf32) \
- V(RiscvMulHigh32) \
- V(RiscvMulHigh64) \
- V(RiscvMulHighU32) \
- V(RiscvMul64) \
- V(RiscvDiv32) \
- V(RiscvDiv64) \
- V(RiscvDivU32) \
- V(RiscvDivU64) \
- V(RiscvMod32) \
- V(RiscvMod64) \
- V(RiscvModU32) \
- V(RiscvModU64) \
- V(RiscvAnd) \
- V(RiscvAnd32) \
- V(RiscvOr) \
- V(RiscvOr32) \
- V(RiscvNor) \
- V(RiscvNor32) \
- V(RiscvXor) \
- V(RiscvXor32) \
- V(RiscvClz32) \
- V(RiscvShl32) \
- V(RiscvShr32) \
- V(RiscvSar32) \
- V(RiscvZeroExtendWord) \
- V(RiscvSignExtendWord) \
- V(RiscvClz64) \
- V(RiscvCtz32) \
- V(RiscvCtz64) \
- V(RiscvPopcnt32) \
- V(RiscvPopcnt64) \
- V(RiscvShl64) \
- V(RiscvShr64) \
- V(RiscvSar64) \
- V(RiscvRor32) \
- V(RiscvRor64) \
- V(RiscvMov) \
- V(RiscvTst) \
- V(RiscvCmp) \
- V(RiscvCmpZero) \
- V(RiscvCmpS) \
- V(RiscvAddS) \
- V(RiscvSubS) \
- V(RiscvMulS) \
- V(RiscvDivS) \
- V(RiscvModS) \
- V(RiscvAbsS) \
- V(RiscvNegS) \
- V(RiscvSqrtS) \
- V(RiscvMaxS) \
- V(RiscvMinS) \
- V(RiscvCmpD) \
- V(RiscvAddD) \
- V(RiscvSubD) \
- V(RiscvMulD) \
- V(RiscvDivD) \
- V(RiscvModD) \
- V(RiscvAbsD) \
- V(RiscvNegD) \
- V(RiscvSqrtD) \
- V(RiscvMaxD) \
- V(RiscvMinD) \
- V(RiscvFloat64RoundDown) \
- V(RiscvFloat64RoundTruncate) \
- V(RiscvFloat64RoundUp) \
- V(RiscvFloat64RoundTiesEven) \
- V(RiscvFloat32RoundDown) \
- V(RiscvFloat32RoundTruncate) \
- V(RiscvFloat32RoundUp) \
- V(RiscvFloat32RoundTiesEven) \
- V(RiscvCvtSD) \
- V(RiscvCvtDS) \
- V(RiscvTruncWD) \
- V(RiscvRoundWD) \
- V(RiscvFloorWD) \
- V(RiscvCeilWD) \
- V(RiscvTruncWS) \
- V(RiscvRoundWS) \
- V(RiscvFloorWS) \
- V(RiscvCeilWS) \
- V(RiscvTruncLS) \
- V(RiscvTruncLD) \
- V(RiscvTruncUwD) \
- V(RiscvTruncUwS) \
- V(RiscvTruncUlS) \
- V(RiscvTruncUlD) \
- V(RiscvCvtDW) \
- V(RiscvCvtSL) \
- V(RiscvCvtSW) \
- V(RiscvCvtSUw) \
- V(RiscvCvtSUl) \
- V(RiscvCvtDL) \
- V(RiscvCvtDUw) \
- V(RiscvCvtDUl) \
- V(RiscvLb) \
- V(RiscvLbu) \
- V(RiscvSb) \
- V(RiscvLh) \
- V(RiscvUlh) \
- V(RiscvLhu) \
- V(RiscvUlhu) \
- V(RiscvSh) \
- V(RiscvUsh) \
- V(RiscvLd) \
- V(RiscvUld) \
- V(RiscvLw) \
- V(RiscvUlw) \
- V(RiscvLwu) \
- V(RiscvUlwu) \
- V(RiscvSw) \
- V(RiscvUsw) \
- V(RiscvSd) \
- V(RiscvUsd) \
- V(RiscvLoadFloat) \
- V(RiscvULoadFloat) \
- V(RiscvStoreFloat) \
- V(RiscvUStoreFloat) \
- V(RiscvLoadDouble) \
- V(RiscvULoadDouble) \
- V(RiscvStoreDouble) \
- V(RiscvUStoreDouble) \
- V(RiscvBitcastDL) \
- V(RiscvBitcastLD) \
- V(RiscvBitcastInt32ToFloat32) \
- V(RiscvBitcastFloat32ToInt32) \
- V(RiscvFloat64ExtractLowWord32) \
- V(RiscvFloat64ExtractHighWord32) \
- V(RiscvFloat64InsertLowWord32) \
- V(RiscvFloat64InsertHighWord32) \
- V(RiscvFloat32Max) \
- V(RiscvFloat64Max) \
- V(RiscvFloat32Min) \
- V(RiscvFloat64Min) \
- V(RiscvFloat64SilenceNaN) \
- V(RiscvPush) \
- V(RiscvPeek) \
- V(RiscvByteSwap64) \
- V(RiscvByteSwap32) \
- V(RiscvStoreToStackSlot) \
- V(RiscvStackClaim) \
- V(RiscvSignExtendByte) \
- V(RiscvSignExtendShort) \
- V(RiscvSync) \
- V(RiscvAssertEqual) \
- V(RiscvS128Const) \
- V(RiscvS128Zero) \
- V(RiscvS128AllOnes) \
- V(RiscvI32x4Splat) \
- V(RiscvI32x4ExtractLane) \
- V(RiscvI32x4ReplaceLane) \
- V(RiscvI32x4Add) \
- V(RiscvI32x4Sub) \
- V(RiscvF64x2Abs) \
- V(RiscvF64x2Neg) \
- V(RiscvF32x4Splat) \
- V(RiscvF32x4ExtractLane) \
- V(RiscvF32x4ReplaceLane) \
- V(RiscvF32x4SConvertI32x4) \
- V(RiscvF32x4UConvertI32x4) \
- V(RiscvI64x2SConvertI32x4Low) \
- V(RiscvI64x2SConvertI32x4High) \
- V(RiscvI64x2UConvertI32x4Low) \
- V(RiscvI64x2UConvertI32x4High) \
- V(RiscvI32x4Mul) \
- V(RiscvI32x4MaxS) \
- V(RiscvI32x4MinS) \
- V(RiscvI32x4Eq) \
- V(RiscvI32x4Ne) \
- V(RiscvI32x4Shl) \
- V(RiscvI32x4ShrS) \
- V(RiscvI32x4ShrU) \
- V(RiscvI32x4MaxU) \
- V(RiscvI32x4MinU) \
- V(RiscvI64x2GtS) \
- V(RiscvI64x2GeS) \
- V(RiscvI64x2Eq) \
- V(RiscvI64x2Ne) \
- V(RiscvF64x2Sqrt) \
- V(RiscvF64x2Add) \
- V(RiscvF64x2Sub) \
- V(RiscvF64x2Mul) \
- V(RiscvF64x2Div) \
- V(RiscvF64x2Min) \
- V(RiscvF64x2Max) \
- V(RiscvF64x2ConvertLowI32x4S) \
- V(RiscvF64x2ConvertLowI32x4U) \
- V(RiscvF64x2PromoteLowF32x4) \
- V(RiscvF64x2Eq) \
- V(RiscvF64x2Ne) \
- V(RiscvF64x2Lt) \
- V(RiscvF64x2Le) \
- V(RiscvF64x2Splat) \
- V(RiscvF64x2ExtractLane) \
- V(RiscvF64x2ReplaceLane) \
- V(RiscvF64x2Pmin) \
- V(RiscvF64x2Pmax) \
- V(RiscvF64x2Ceil) \
- V(RiscvF64x2Floor) \
- V(RiscvF64x2Trunc) \
- V(RiscvF64x2NearestInt) \
- V(RiscvI64x2Splat) \
- V(RiscvI64x2ExtractLane) \
- V(RiscvI64x2ReplaceLane) \
- V(RiscvI64x2Add) \
- V(RiscvI64x2Sub) \
- V(RiscvI64x2Mul) \
- V(RiscvI64x2Abs) \
- V(RiscvI64x2Neg) \
- V(RiscvI64x2Shl) \
- V(RiscvI64x2ShrS) \
- V(RiscvI64x2ShrU) \
- V(RiscvI64x2BitMask) \
- V(RiscvF32x4Abs) \
- V(RiscvF32x4Neg) \
- V(RiscvF32x4Sqrt) \
- V(RiscvF32x4RecipApprox) \
- V(RiscvF32x4RecipSqrtApprox) \
- V(RiscvF32x4Add) \
- V(RiscvF32x4Sub) \
- V(RiscvF32x4Mul) \
- V(RiscvF32x4Div) \
- V(RiscvF32x4Max) \
- V(RiscvF32x4Min) \
- V(RiscvF32x4Eq) \
- V(RiscvF32x4Ne) \
- V(RiscvF32x4Lt) \
- V(RiscvF32x4Le) \
- V(RiscvF32x4Pmin) \
- V(RiscvF32x4Pmax) \
- V(RiscvF32x4DemoteF64x2Zero) \
- V(RiscvF32x4Ceil) \
- V(RiscvF32x4Floor) \
- V(RiscvF32x4Trunc) \
- V(RiscvF32x4NearestInt) \
- V(RiscvI32x4SConvertF32x4) \
- V(RiscvI32x4UConvertF32x4) \
- V(RiscvI32x4Neg) \
- V(RiscvI32x4GtS) \
- V(RiscvI32x4GeS) \
- V(RiscvI32x4GtU) \
- V(RiscvI32x4GeU) \
- V(RiscvI32x4Abs) \
- V(RiscvI32x4BitMask) \
- V(RiscvI32x4DotI16x8S) \
- V(RiscvI32x4TruncSatF64x2SZero) \
- V(RiscvI32x4TruncSatF64x2UZero) \
- V(RiscvI16x8Splat) \
- V(RiscvI16x8ExtractLaneU) \
- V(RiscvI16x8ExtractLaneS) \
- V(RiscvI16x8ReplaceLane) \
- V(RiscvI16x8Neg) \
- V(RiscvI16x8Shl) \
- V(RiscvI16x8ShrS) \
- V(RiscvI16x8ShrU) \
- V(RiscvI16x8Add) \
- V(RiscvI16x8AddSatS) \
- V(RiscvI16x8Sub) \
- V(RiscvI16x8SubSatS) \
- V(RiscvI16x8Mul) \
- V(RiscvI16x8MaxS) \
- V(RiscvI16x8MinS) \
- V(RiscvI16x8Eq) \
- V(RiscvI16x8Ne) \
- V(RiscvI16x8GtS) \
- V(RiscvI16x8GeS) \
- V(RiscvI16x8AddSatU) \
- V(RiscvI16x8SubSatU) \
- V(RiscvI16x8MaxU) \
- V(RiscvI16x8MinU) \
- V(RiscvI16x8GtU) \
- V(RiscvI16x8GeU) \
- V(RiscvI16x8RoundingAverageU) \
- V(RiscvI16x8Q15MulRSatS) \
- V(RiscvI16x8Abs) \
- V(RiscvI16x8BitMask) \
- V(RiscvI8x16Splat) \
- V(RiscvI8x16ExtractLaneU) \
- V(RiscvI8x16ExtractLaneS) \
- V(RiscvI8x16ReplaceLane) \
- V(RiscvI8x16Neg) \
- V(RiscvI8x16Shl) \
- V(RiscvI8x16ShrS) \
- V(RiscvI8x16Add) \
- V(RiscvI8x16AddSatS) \
- V(RiscvI8x16Sub) \
- V(RiscvI8x16SubSatS) \
- V(RiscvI8x16MaxS) \
- V(RiscvI8x16MinS) \
- V(RiscvI8x16Eq) \
- V(RiscvI8x16Ne) \
- V(RiscvI8x16GtS) \
- V(RiscvI8x16GeS) \
- V(RiscvI8x16ShrU) \
- V(RiscvI8x16AddSatU) \
- V(RiscvI8x16SubSatU) \
- V(RiscvI8x16MaxU) \
- V(RiscvI8x16MinU) \
- V(RiscvI8x16GtU) \
- V(RiscvI8x16GeU) \
- V(RiscvI8x16RoundingAverageU) \
- V(RiscvI8x16Abs) \
- V(RiscvI8x16BitMask) \
- V(RiscvI8x16Popcnt) \
- V(RiscvS128And) \
- V(RiscvS128Or) \
- V(RiscvS128Xor) \
- V(RiscvS128Not) \
- V(RiscvS128Select) \
- V(RiscvS128AndNot) \
- V(RiscvI32x4AllTrue) \
- V(RiscvI16x8AllTrue) \
- V(RiscvV128AnyTrue) \
- V(RiscvI8x16AllTrue) \
- V(RiscvI64x2AllTrue) \
- V(RiscvS32x4InterleaveRight) \
- V(RiscvS32x4InterleaveLeft) \
- V(RiscvS32x4PackEven) \
- V(RiscvS32x4PackOdd) \
- V(RiscvS32x4InterleaveEven) \
- V(RiscvS32x4InterleaveOdd) \
- V(RiscvS32x4Shuffle) \
- V(RiscvS16x8InterleaveRight) \
- V(RiscvS16x8InterleaveLeft) \
- V(RiscvS16x8PackEven) \
- V(RiscvS16x8PackOdd) \
- V(RiscvS16x8InterleaveEven) \
- V(RiscvS16x8InterleaveOdd) \
- V(RiscvS16x4Reverse) \
- V(RiscvS16x2Reverse) \
- V(RiscvS8x16InterleaveRight) \
- V(RiscvS8x16InterleaveLeft) \
- V(RiscvS8x16PackEven) \
- V(RiscvS8x16PackOdd) \
- V(RiscvS8x16InterleaveEven) \
- V(RiscvS8x16InterleaveOdd) \
- V(RiscvI8x16Shuffle) \
- V(RiscvI8x16Swizzle) \
- V(RiscvS8x16Concat) \
- V(RiscvS8x8Reverse) \
- V(RiscvS8x4Reverse) \
- V(RiscvS8x2Reverse) \
- V(RiscvS128Load8Splat) \
- V(RiscvS128Load16Splat) \
- V(RiscvS128Load32Splat) \
- V(RiscvS128Load64Splat) \
- V(RiscvS128Load8x8S) \
- V(RiscvS128Load8x8U) \
- V(RiscvS128Load16x4S) \
- V(RiscvS128Load16x4U) \
- V(RiscvS128Load32x2S) \
- V(RiscvS128Load32x2U) \
- V(RiscvS128LoadLane) \
- V(RiscvS128StoreLane) \
- V(RiscvRvvLd) \
- V(RiscvRvvSt) \
- V(RiscvI32x4SConvertI16x8Low) \
- V(RiscvI32x4SConvertI16x8High) \
- V(RiscvI32x4UConvertI16x8Low) \
- V(RiscvI32x4UConvertI16x8High) \
- V(RiscvI16x8SConvertI8x16Low) \
- V(RiscvI16x8SConvertI8x16High) \
- V(RiscvI16x8SConvertI32x4) \
- V(RiscvI16x8UConvertI32x4) \
- V(RiscvI16x8UConvertI8x16Low) \
- V(RiscvI16x8UConvertI8x16High) \
- V(RiscvI8x16SConvertI16x8) \
- V(RiscvI8x16UConvertI16x8) \
- V(RiscvWord64AtomicLoadUint64) \
- V(RiscvWord64AtomicStoreWord64) \
- V(RiscvWord64AtomicAddUint64) \
- V(RiscvWord64AtomicSubUint64) \
- V(RiscvWord64AtomicAndUint64) \
- V(RiscvWord64AtomicOrUint64) \
- V(RiscvWord64AtomicXorUint64) \
- V(RiscvWord64AtomicExchangeUint64) \
- V(RiscvWord64AtomicCompareExchangeUint64) \
- V(RiscvStoreCompressTagged) \
- V(RiscvLoadDecompressTaggedSigned) \
- V(RiscvLoadDecompressTaggedPointer) \
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(RiscvAdd32) \
+ V(RiscvAdd64) \
+ V(RiscvAddOvf64) \
+ V(RiscvSub32) \
+ V(RiscvSub64) \
+ V(RiscvSubOvf64) \
+ V(RiscvMul32) \
+ V(RiscvMulOvf32) \
+ V(RiscvMulHigh32) \
+ V(RiscvMulHigh64) \
+ V(RiscvMulHighU32) \
+ V(RiscvMul64) \
+ V(RiscvDiv32) \
+ V(RiscvDiv64) \
+ V(RiscvDivU32) \
+ V(RiscvDivU64) \
+ V(RiscvMod32) \
+ V(RiscvMod64) \
+ V(RiscvModU32) \
+ V(RiscvModU64) \
+ V(RiscvAnd) \
+ V(RiscvAnd32) \
+ V(RiscvOr) \
+ V(RiscvOr32) \
+ V(RiscvNor) \
+ V(RiscvNor32) \
+ V(RiscvXor) \
+ V(RiscvXor32) \
+ V(RiscvClz32) \
+ V(RiscvShl32) \
+ V(RiscvShr32) \
+ V(RiscvSar32) \
+ V(RiscvZeroExtendWord) \
+ V(RiscvSignExtendWord) \
+ V(RiscvClz64) \
+ V(RiscvCtz32) \
+ V(RiscvCtz64) \
+ V(RiscvPopcnt32) \
+ V(RiscvPopcnt64) \
+ V(RiscvShl64) \
+ V(RiscvShr64) \
+ V(RiscvSar64) \
+ V(RiscvRor32) \
+ V(RiscvRor64) \
+ V(RiscvMov) \
+ V(RiscvTst) \
+ V(RiscvCmp) \
+ V(RiscvCmpZero) \
+ V(RiscvCmpS) \
+ V(RiscvAddS) \
+ V(RiscvSubS) \
+ V(RiscvMulS) \
+ V(RiscvDivS) \
+ V(RiscvModS) \
+ V(RiscvAbsS) \
+ V(RiscvNegS) \
+ V(RiscvSqrtS) \
+ V(RiscvMaxS) \
+ V(RiscvMinS) \
+ V(RiscvCmpD) \
+ V(RiscvAddD) \
+ V(RiscvSubD) \
+ V(RiscvMulD) \
+ V(RiscvDivD) \
+ V(RiscvModD) \
+ V(RiscvAbsD) \
+ V(RiscvNegD) \
+ V(RiscvSqrtD) \
+ V(RiscvMaxD) \
+ V(RiscvMinD) \
+ V(RiscvFloat64RoundDown) \
+ V(RiscvFloat64RoundTruncate) \
+ V(RiscvFloat64RoundUp) \
+ V(RiscvFloat64RoundTiesEven) \
+ V(RiscvFloat32RoundDown) \
+ V(RiscvFloat32RoundTruncate) \
+ V(RiscvFloat32RoundUp) \
+ V(RiscvFloat32RoundTiesEven) \
+ V(RiscvCvtSD) \
+ V(RiscvCvtDS) \
+ V(RiscvTruncWD) \
+ V(RiscvRoundWD) \
+ V(RiscvFloorWD) \
+ V(RiscvCeilWD) \
+ V(RiscvTruncWS) \
+ V(RiscvRoundWS) \
+ V(RiscvFloorWS) \
+ V(RiscvCeilWS) \
+ V(RiscvTruncLS) \
+ V(RiscvTruncLD) \
+ V(RiscvTruncUwD) \
+ V(RiscvTruncUwS) \
+ V(RiscvTruncUlS) \
+ V(RiscvTruncUlD) \
+ V(RiscvCvtDW) \
+ V(RiscvCvtSL) \
+ V(RiscvCvtSW) \
+ V(RiscvCvtSUw) \
+ V(RiscvCvtSUl) \
+ V(RiscvCvtDL) \
+ V(RiscvCvtDUw) \
+ V(RiscvCvtDUl) \
+ V(RiscvLb) \
+ V(RiscvLbu) \
+ V(RiscvSb) \
+ V(RiscvLh) \
+ V(RiscvUlh) \
+ V(RiscvLhu) \
+ V(RiscvUlhu) \
+ V(RiscvSh) \
+ V(RiscvUsh) \
+ V(RiscvLd) \
+ V(RiscvUld) \
+ V(RiscvLw) \
+ V(RiscvUlw) \
+ V(RiscvLwu) \
+ V(RiscvUlwu) \
+ V(RiscvSw) \
+ V(RiscvUsw) \
+ V(RiscvSd) \
+ V(RiscvUsd) \
+ V(RiscvLoadFloat) \
+ V(RiscvULoadFloat) \
+ V(RiscvStoreFloat) \
+ V(RiscvUStoreFloat) \
+ V(RiscvLoadDouble) \
+ V(RiscvULoadDouble) \
+ V(RiscvStoreDouble) \
+ V(RiscvUStoreDouble) \
+ V(RiscvBitcastDL) \
+ V(RiscvBitcastLD) \
+ V(RiscvBitcastInt32ToFloat32) \
+ V(RiscvBitcastFloat32ToInt32) \
+ V(RiscvFloat64ExtractLowWord32) \
+ V(RiscvFloat64ExtractHighWord32) \
+ V(RiscvFloat64InsertLowWord32) \
+ V(RiscvFloat64InsertHighWord32) \
+ V(RiscvFloat32Max) \
+ V(RiscvFloat64Max) \
+ V(RiscvFloat32Min) \
+ V(RiscvFloat64Min) \
+ V(RiscvFloat64SilenceNaN) \
+ V(RiscvPush) \
+ V(RiscvPeek) \
+ V(RiscvByteSwap64) \
+ V(RiscvByteSwap32) \
+ V(RiscvStoreToStackSlot) \
+ V(RiscvStackClaim) \
+ V(RiscvSignExtendByte) \
+ V(RiscvSignExtendShort) \
+ V(RiscvSync) \
+ V(RiscvAssertEqual) \
+ V(RiscvS128Const) \
+ V(RiscvS128Zero) \
+ V(RiscvS128AllOnes) \
+ V(RiscvI32x4Splat) \
+ V(RiscvI32x4ExtractLane) \
+ V(RiscvI32x4ReplaceLane) \
+ V(RiscvI32x4Add) \
+ V(RiscvI32x4Sub) \
+ V(RiscvF64x2Abs) \
+ V(RiscvF64x2Neg) \
+ V(RiscvF32x4Splat) \
+ V(RiscvF32x4ExtractLane) \
+ V(RiscvF32x4ReplaceLane) \
+ V(RiscvF32x4SConvertI32x4) \
+ V(RiscvF32x4UConvertI32x4) \
+ V(RiscvI64x2SConvertI32x4Low) \
+ V(RiscvI64x2SConvertI32x4High) \
+ V(RiscvI64x2UConvertI32x4Low) \
+ V(RiscvI64x2UConvertI32x4High) \
+ V(RiscvI32x4Mul) \
+ V(RiscvI32x4MaxS) \
+ V(RiscvI32x4MinS) \
+ V(RiscvI32x4Eq) \
+ V(RiscvI32x4Ne) \
+ V(RiscvI32x4Shl) \
+ V(RiscvI32x4ShrS) \
+ V(RiscvI32x4ShrU) \
+ V(RiscvI32x4MaxU) \
+ V(RiscvI32x4MinU) \
+ V(RiscvI64x2GtS) \
+ V(RiscvI64x2GeS) \
+ V(RiscvI64x2Eq) \
+ V(RiscvI64x2Ne) \
+ V(RiscvF64x2Sqrt) \
+ V(RiscvF64x2Add) \
+ V(RiscvF64x2Sub) \
+ V(RiscvF64x2Mul) \
+ V(RiscvF64x2Div) \
+ V(RiscvF64x2Min) \
+ V(RiscvF64x2Max) \
+ V(RiscvF64x2ConvertLowI32x4S) \
+ V(RiscvF64x2ConvertLowI32x4U) \
+ V(RiscvF64x2PromoteLowF32x4) \
+ V(RiscvF64x2Eq) \
+ V(RiscvF64x2Ne) \
+ V(RiscvF64x2Lt) \
+ V(RiscvF64x2Le) \
+ V(RiscvF64x2Splat) \
+ V(RiscvF64x2ExtractLane) \
+ V(RiscvF64x2ReplaceLane) \
+ V(RiscvF64x2Pmin) \
+ V(RiscvF64x2Pmax) \
+ V(RiscvF64x2Ceil) \
+ V(RiscvF64x2Floor) \
+ V(RiscvF64x2Trunc) \
+ V(RiscvF64x2NearestInt) \
+ V(RiscvI64x2Splat) \
+ V(RiscvI64x2ExtractLane) \
+ V(RiscvI64x2ReplaceLane) \
+ V(RiscvI64x2Add) \
+ V(RiscvI64x2Sub) \
+ V(RiscvI64x2Mul) \
+ V(RiscvI64x2Abs) \
+ V(RiscvI64x2Neg) \
+ V(RiscvI64x2Shl) \
+ V(RiscvI64x2ShrS) \
+ V(RiscvI64x2ShrU) \
+ V(RiscvI64x2BitMask) \
+ V(RiscvF32x4Abs) \
+ V(RiscvF32x4Neg) \
+ V(RiscvF32x4Sqrt) \
+ V(RiscvF32x4RecipApprox) \
+ V(RiscvF32x4RecipSqrtApprox) \
+ V(RiscvF32x4Add) \
+ V(RiscvF32x4Sub) \
+ V(RiscvF32x4Mul) \
+ V(RiscvF32x4Div) \
+ V(RiscvF32x4Max) \
+ V(RiscvF32x4Min) \
+ V(RiscvF32x4Eq) \
+ V(RiscvF32x4Ne) \
+ V(RiscvF32x4Lt) \
+ V(RiscvF32x4Le) \
+ V(RiscvF32x4Pmin) \
+ V(RiscvF32x4Pmax) \
+ V(RiscvF32x4DemoteF64x2Zero) \
+ V(RiscvF32x4Ceil) \
+ V(RiscvF32x4Floor) \
+ V(RiscvF32x4Trunc) \
+ V(RiscvF32x4NearestInt) \
+ V(RiscvI32x4SConvertF32x4) \
+ V(RiscvI32x4UConvertF32x4) \
+ V(RiscvI32x4Neg) \
+ V(RiscvI32x4GtS) \
+ V(RiscvI32x4GeS) \
+ V(RiscvI32x4GtU) \
+ V(RiscvI32x4GeU) \
+ V(RiscvI32x4Abs) \
+ V(RiscvI32x4BitMask) \
+ V(RiscvI32x4DotI16x8S) \
+ V(RiscvI32x4TruncSatF64x2SZero) \
+ V(RiscvI32x4TruncSatF64x2UZero) \
+ V(RiscvI16x8Splat) \
+ V(RiscvI16x8ExtractLaneU) \
+ V(RiscvI16x8ExtractLaneS) \
+ V(RiscvI16x8ReplaceLane) \
+ V(RiscvI16x8Neg) \
+ V(RiscvI16x8Shl) \
+ V(RiscvI16x8ShrS) \
+ V(RiscvI16x8ShrU) \
+ V(RiscvI16x8Add) \
+ V(RiscvI16x8AddSatS) \
+ V(RiscvI16x8Sub) \
+ V(RiscvI16x8SubSatS) \
+ V(RiscvI16x8Mul) \
+ V(RiscvI16x8MaxS) \
+ V(RiscvI16x8MinS) \
+ V(RiscvI16x8Eq) \
+ V(RiscvI16x8Ne) \
+ V(RiscvI16x8GtS) \
+ V(RiscvI16x8GeS) \
+ V(RiscvI16x8AddSatU) \
+ V(RiscvI16x8SubSatU) \
+ V(RiscvI16x8MaxU) \
+ V(RiscvI16x8MinU) \
+ V(RiscvI16x8GtU) \
+ V(RiscvI16x8GeU) \
+ V(RiscvI16x8RoundingAverageU) \
+ V(RiscvI16x8Q15MulRSatS) \
+ V(RiscvI16x8Abs) \
+ V(RiscvI16x8BitMask) \
+ V(RiscvI8x16Splat) \
+ V(RiscvI8x16ExtractLaneU) \
+ V(RiscvI8x16ExtractLaneS) \
+ V(RiscvI8x16ReplaceLane) \
+ V(RiscvI8x16Neg) \
+ V(RiscvI8x16Shl) \
+ V(RiscvI8x16ShrS) \
+ V(RiscvI8x16Add) \
+ V(RiscvI8x16AddSatS) \
+ V(RiscvI8x16Sub) \
+ V(RiscvI8x16SubSatS) \
+ V(RiscvI8x16MaxS) \
+ V(RiscvI8x16MinS) \
+ V(RiscvI8x16Eq) \
+ V(RiscvI8x16Ne) \
+ V(RiscvI8x16GtS) \
+ V(RiscvI8x16GeS) \
+ V(RiscvI8x16ShrU) \
+ V(RiscvI8x16AddSatU) \
+ V(RiscvI8x16SubSatU) \
+ V(RiscvI8x16MaxU) \
+ V(RiscvI8x16MinU) \
+ V(RiscvI8x16GtU) \
+ V(RiscvI8x16GeU) \
+ V(RiscvI8x16RoundingAverageU) \
+ V(RiscvI8x16Abs) \
+ V(RiscvI8x16BitMask) \
+ V(RiscvI8x16Popcnt) \
+ V(RiscvS128And) \
+ V(RiscvS128Or) \
+ V(RiscvS128Xor) \
+ V(RiscvS128Not) \
+ V(RiscvS128Select) \
+ V(RiscvS128AndNot) \
+ V(RiscvI32x4AllTrue) \
+ V(RiscvI16x8AllTrue) \
+ V(RiscvV128AnyTrue) \
+ V(RiscvI8x16AllTrue) \
+ V(RiscvI64x2AllTrue) \
+ V(RiscvS32x4InterleaveRight) \
+ V(RiscvS32x4InterleaveLeft) \
+ V(RiscvS32x4PackEven) \
+ V(RiscvS32x4PackOdd) \
+ V(RiscvS32x4InterleaveEven) \
+ V(RiscvS32x4InterleaveOdd) \
+ V(RiscvS32x4Shuffle) \
+ V(RiscvS16x8InterleaveRight) \
+ V(RiscvS16x8InterleaveLeft) \
+ V(RiscvS16x8PackEven) \
+ V(RiscvS16x8PackOdd) \
+ V(RiscvS16x8InterleaveEven) \
+ V(RiscvS16x8InterleaveOdd) \
+ V(RiscvS16x4Reverse) \
+ V(RiscvS16x2Reverse) \
+ V(RiscvS8x16InterleaveRight) \
+ V(RiscvS8x16InterleaveLeft) \
+ V(RiscvS8x16PackEven) \
+ V(RiscvS8x16PackOdd) \
+ V(RiscvS8x16InterleaveEven) \
+ V(RiscvS8x16InterleaveOdd) \
+ V(RiscvI8x16Shuffle) \
+ V(RiscvI8x16Swizzle) \
+ V(RiscvS8x16Concat) \
+ V(RiscvS8x8Reverse) \
+ V(RiscvS8x4Reverse) \
+ V(RiscvS8x2Reverse) \
+ V(RiscvS128Load8Splat) \
+ V(RiscvS128Load16Splat) \
+ V(RiscvS128Load32Splat) \
+ V(RiscvS128Load64Splat) \
+ V(RiscvS128Load8x8S) \
+ V(RiscvS128Load8x8U) \
+ V(RiscvS128Load16x4S) \
+ V(RiscvS128Load16x4U) \
+ V(RiscvS128Load32x2S) \
+ V(RiscvS128Load32x2U) \
+ V(RiscvS128LoadLane) \
+ V(RiscvS128StoreLane) \
+ V(RiscvRvvLd) \
+ V(RiscvRvvSt) \
+ V(RiscvI32x4SConvertI16x8Low) \
+ V(RiscvI32x4SConvertI16x8High) \
+ V(RiscvI32x4UConvertI16x8Low) \
+ V(RiscvI32x4UConvertI16x8High) \
+ V(RiscvI16x8SConvertI8x16Low) \
+ V(RiscvI16x8SConvertI8x16High) \
+ V(RiscvI16x8SConvertI32x4) \
+ V(RiscvI16x8UConvertI32x4) \
+ V(RiscvI16x8UConvertI8x16Low) \
+ V(RiscvI16x8UConvertI8x16High) \
+ V(RiscvI8x16SConvertI16x8) \
+ V(RiscvI8x16UConvertI16x8) \
+ V(RiscvWord64AtomicLoadUint64) \
+ V(RiscvWord64AtomicStoreWord64) \
+ V(RiscvWord64AtomicAddUint64) \
+ V(RiscvWord64AtomicSubUint64) \
+ V(RiscvWord64AtomicAndUint64) \
+ V(RiscvWord64AtomicOrUint64) \
+ V(RiscvWord64AtomicXorUint64) \
+ V(RiscvWord64AtomicExchangeUint64) \
+ V(RiscvWord64AtomicCompareExchangeUint64) \
+ V(RiscvStoreCompressTagged) \
+ V(RiscvLoadDecompressTaggedSigned) \
+ V(RiscvLoadDecompressTaggedPointer) \
V(RiscvLoadDecompressAnyTagged)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
index 471628b1f8..54d9a98663 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
@@ -1117,7 +1117,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return AssembleArchJumpLatency();
case kArchTableSwitch:
return AssembleArchTableSwitchLatency();
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
return CallLatency() + 1;
case kArchDebugBreak:
return 1;
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
index 85d61aa02f..6fc64256ec 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
@@ -363,9 +363,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
RiscvOperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
}
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
@@ -454,7 +454,7 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = load_rep.IsUnsigned() ? kRiscvLhu : kRiscvLh;
break;
case MachineRepresentation::kWord32:
- opcode = load_rep.IsUnsigned() ? kRiscvLwu : kRiscvLw;
+ opcode = kRiscvLw;
break;
#ifdef V8_COMPRESS_POINTERS
case MachineRepresentation::kTaggedSigned:
@@ -1287,7 +1287,6 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
- case MachineRepresentation::kWord32:
return true;
default:
return false;
@@ -1623,7 +1622,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
opcode = load_rep.IsUnsigned() ? kRiscvUlhu : kRiscvUlh;
break;
case MachineRepresentation::kWord32:
- opcode = load_rep.IsUnsigned() ? kRiscvUlwu : kRiscvUlw;
+ opcode = kRiscvUlw;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 3c2c3d6c06..e58a0ed576 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -1174,8 +1174,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchPrepareCallCFunction: {
- int const num_parameters = MiscField::decode(instr->opcode());
- __ PrepareCallCFunction(num_parameters, kScratchReg);
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_gp_parameters + num_fp_parameters,
+ kScratchReg);
// Frame alignment requires using FP-relative frame addressing.
frame_access_state()->SetFrameAccessToFP();
break;
@@ -1211,7 +1213,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
- int const num_parameters = MiscField::decode(instr->opcode());
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
Label return_location;
// Put the return address in a stack slot.
#if V8_ENABLE_WEBASSEMBLY
@@ -1224,10 +1227,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif // V8_ENABLE_WEBASSEMBLY
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
- __ CallCFunction(ref, num_parameters);
+ __ CallCFunction(ref, num_gp_parameters, num_fp_parameters);
} else {
Register func = i.InputRegister(0);
- __ CallCFunction(func, num_parameters);
+ __ CallCFunction(func, num_gp_parameters, num_fp_parameters);
}
__ bind(&return_location);
#if V8_ENABLE_WEBASSEMBLY
@@ -1263,13 +1266,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == r3);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ stop();
diff --git a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
index 03806b57b1..7dcd7212c9 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
@@ -11,392 +11,397 @@ namespace compiler {
// S390-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(S390_Peek) \
- V(S390_Abs32) \
- V(S390_Abs64) \
- V(S390_And32) \
- V(S390_And64) \
- V(S390_Or32) \
- V(S390_Or64) \
- V(S390_Xor32) \
- V(S390_Xor64) \
- V(S390_ShiftLeft32) \
- V(S390_ShiftLeft64) \
- V(S390_ShiftRight32) \
- V(S390_ShiftRight64) \
- V(S390_ShiftRightArith32) \
- V(S390_ShiftRightArith64) \
- V(S390_RotRight32) \
- V(S390_RotRight64) \
- V(S390_Not32) \
- V(S390_Not64) \
- V(S390_RotLeftAndClear64) \
- V(S390_RotLeftAndClearLeft64) \
- V(S390_RotLeftAndClearRight64) \
- V(S390_Lay) \
- V(S390_Add32) \
- V(S390_Add64) \
- V(S390_AddFloat) \
- V(S390_AddDouble) \
- V(S390_Sub32) \
- V(S390_Sub64) \
- V(S390_SubFloat) \
- V(S390_SubDouble) \
- V(S390_Mul32) \
- V(S390_Mul32WithOverflow) \
- V(S390_Mul64) \
- V(S390_MulHigh32) \
- V(S390_MulHighU32) \
- V(S390_MulFloat) \
- V(S390_MulDouble) \
- V(S390_Div32) \
- V(S390_Div64) \
- V(S390_DivU32) \
- V(S390_DivU64) \
- V(S390_DivFloat) \
- V(S390_DivDouble) \
- V(S390_Mod32) \
- V(S390_Mod64) \
- V(S390_ModU32) \
- V(S390_ModU64) \
- V(S390_ModDouble) \
- V(S390_Neg32) \
- V(S390_Neg64) \
- V(S390_NegDouble) \
- V(S390_NegFloat) \
- V(S390_SqrtFloat) \
- V(S390_FloorFloat) \
- V(S390_CeilFloat) \
- V(S390_TruncateFloat) \
- V(S390_FloatNearestInt) \
- V(S390_AbsFloat) \
- V(S390_SqrtDouble) \
- V(S390_FloorDouble) \
- V(S390_CeilDouble) \
- V(S390_TruncateDouble) \
- V(S390_RoundDouble) \
- V(S390_DoubleNearestInt) \
- V(S390_MaxFloat) \
- V(S390_MaxDouble) \
- V(S390_MinFloat) \
- V(S390_MinDouble) \
- V(S390_AbsDouble) \
- V(S390_Cntlz32) \
- V(S390_Cntlz64) \
- V(S390_Popcnt32) \
- V(S390_Popcnt64) \
- V(S390_Cmp32) \
- V(S390_Cmp64) \
- V(S390_CmpFloat) \
- V(S390_CmpDouble) \
- V(S390_Tst32) \
- V(S390_Tst64) \
- V(S390_Push) \
- V(S390_PushFrame) \
- V(S390_StoreToStackSlot) \
- V(S390_SignExtendWord8ToInt32) \
- V(S390_SignExtendWord16ToInt32) \
- V(S390_SignExtendWord8ToInt64) \
- V(S390_SignExtendWord16ToInt64) \
- V(S390_SignExtendWord32ToInt64) \
- V(S390_Uint32ToUint64) \
- V(S390_Int64ToInt32) \
- V(S390_Int64ToFloat32) \
- V(S390_Int64ToDouble) \
- V(S390_Uint64ToFloat32) \
- V(S390_Uint64ToDouble) \
- V(S390_Int32ToFloat32) \
- V(S390_Int32ToDouble) \
- V(S390_Uint32ToFloat32) \
- V(S390_Uint32ToDouble) \
- V(S390_Float32ToInt64) \
- V(S390_Float32ToUint64) \
- V(S390_Float32ToInt32) \
- V(S390_Float32ToUint32) \
- V(S390_Float32ToDouble) \
- V(S390_Float64SilenceNaN) \
- V(S390_DoubleToInt32) \
- V(S390_DoubleToUint32) \
- V(S390_DoubleToInt64) \
- V(S390_DoubleToUint64) \
- V(S390_DoubleToFloat32) \
- V(S390_DoubleExtractLowWord32) \
- V(S390_DoubleExtractHighWord32) \
- V(S390_DoubleInsertLowWord32) \
- V(S390_DoubleInsertHighWord32) \
- V(S390_DoubleConstruct) \
- V(S390_BitcastInt32ToFloat32) \
- V(S390_BitcastFloat32ToInt32) \
- V(S390_BitcastInt64ToDouble) \
- V(S390_BitcastDoubleToInt64) \
- V(S390_LoadWordS8) \
- V(S390_LoadWordU8) \
- V(S390_LoadWordS16) \
- V(S390_LoadWordU16) \
- V(S390_LoadWordS32) \
- V(S390_LoadWordU32) \
- V(S390_LoadAndTestWord32) \
- V(S390_LoadAndTestWord64) \
- V(S390_LoadAndTestFloat32) \
- V(S390_LoadAndTestFloat64) \
- V(S390_LoadReverse16RR) \
- V(S390_LoadReverse32RR) \
- V(S390_LoadReverse64RR) \
- V(S390_LoadReverseSimd128RR) \
- V(S390_LoadReverseSimd128) \
- V(S390_LoadReverse16) \
- V(S390_LoadReverse32) \
- V(S390_LoadReverse64) \
- V(S390_LoadWord64) \
- V(S390_LoadFloat32) \
- V(S390_LoadDouble) \
- V(S390_StoreWord8) \
- V(S390_StoreWord16) \
- V(S390_StoreWord32) \
- V(S390_StoreWord64) \
- V(S390_StoreReverse16) \
- V(S390_StoreReverse32) \
- V(S390_StoreReverse64) \
- V(S390_StoreReverseSimd128) \
- V(S390_StoreFloat32) \
- V(S390_StoreDouble) \
- V(S390_Word64AtomicExchangeUint64) \
- V(S390_Word64AtomicCompareExchangeUint64) \
- V(S390_Word64AtomicAddUint64) \
- V(S390_Word64AtomicSubUint64) \
- V(S390_Word64AtomicAndUint64) \
- V(S390_Word64AtomicOrUint64) \
- V(S390_Word64AtomicXorUint64) \
- V(S390_F64x2Splat) \
- V(S390_F64x2ReplaceLane) \
- V(S390_F64x2Abs) \
- V(S390_F64x2Neg) \
- V(S390_F64x2Sqrt) \
- V(S390_F64x2Add) \
- V(S390_F64x2Sub) \
- V(S390_F64x2Mul) \
- V(S390_F64x2Div) \
- V(S390_F64x2Eq) \
- V(S390_F64x2Ne) \
- V(S390_F64x2Lt) \
- V(S390_F64x2Le) \
- V(S390_F64x2Min) \
- V(S390_F64x2Max) \
- V(S390_F64x2ExtractLane) \
- V(S390_F64x2Qfma) \
- V(S390_F64x2Qfms) \
- V(S390_F64x2Pmin) \
- V(S390_F64x2Pmax) \
- V(S390_F64x2Ceil) \
- V(S390_F64x2Floor) \
- V(S390_F64x2Trunc) \
- V(S390_F64x2NearestInt) \
- V(S390_F64x2ConvertLowI32x4S) \
- V(S390_F64x2ConvertLowI32x4U) \
- V(S390_F64x2PromoteLowF32x4) \
- V(S390_F32x4Splat) \
- V(S390_F32x4ExtractLane) \
- V(S390_F32x4ReplaceLane) \
- V(S390_F32x4Add) \
- V(S390_F32x4Sub) \
- V(S390_F32x4Mul) \
- V(S390_F32x4Eq) \
- V(S390_F32x4Ne) \
- V(S390_F32x4Lt) \
- V(S390_F32x4Le) \
- V(S390_F32x4Abs) \
- V(S390_F32x4Neg) \
- V(S390_F32x4RecipApprox) \
- V(S390_F32x4RecipSqrtApprox) \
- V(S390_F32x4SConvertI32x4) \
- V(S390_F32x4UConvertI32x4) \
- V(S390_F32x4Sqrt) \
- V(S390_F32x4Div) \
- V(S390_F32x4Min) \
- V(S390_F32x4Max) \
- V(S390_F32x4Qfma) \
- V(S390_F32x4Qfms) \
- V(S390_F32x4Pmin) \
- V(S390_F32x4Pmax) \
- V(S390_F32x4Ceil) \
- V(S390_F32x4Floor) \
- V(S390_F32x4Trunc) \
- V(S390_F32x4NearestInt) \
- V(S390_F32x4DemoteF64x2Zero) \
- V(S390_I64x2Neg) \
- V(S390_I64x2Add) \
- V(S390_I64x2Sub) \
- V(S390_I64x2Shl) \
- V(S390_I64x2ShrS) \
- V(S390_I64x2ShrU) \
- V(S390_I64x2Mul) \
- V(S390_I64x2Splat) \
- V(S390_I64x2ReplaceLane) \
- V(S390_I64x2ExtractLane) \
- V(S390_I64x2Eq) \
- V(S390_I64x2BitMask) \
- V(S390_I64x2ExtMulLowI32x4S) \
- V(S390_I64x2ExtMulHighI32x4S) \
- V(S390_I64x2ExtMulLowI32x4U) \
- V(S390_I64x2ExtMulHighI32x4U) \
- V(S390_I64x2SConvertI32x4Low) \
- V(S390_I64x2SConvertI32x4High) \
- V(S390_I64x2UConvertI32x4Low) \
- V(S390_I64x2UConvertI32x4High) \
- V(S390_I64x2Ne) \
- V(S390_I64x2GtS) \
- V(S390_I64x2GeS) \
- V(S390_I64x2Abs) \
- V(S390_I32x4Splat) \
- V(S390_I32x4ExtractLane) \
- V(S390_I32x4ReplaceLane) \
- V(S390_I32x4Add) \
- V(S390_I32x4Sub) \
- V(S390_I32x4Mul) \
- V(S390_I32x4MinS) \
- V(S390_I32x4MinU) \
- V(S390_I32x4MaxS) \
- V(S390_I32x4MaxU) \
- V(S390_I32x4Eq) \
- V(S390_I32x4Ne) \
- V(S390_I32x4GtS) \
- V(S390_I32x4GeS) \
- V(S390_I32x4GtU) \
- V(S390_I32x4GeU) \
- V(S390_I32x4Neg) \
- V(S390_I32x4Shl) \
- V(S390_I32x4ShrS) \
- V(S390_I32x4ShrU) \
- V(S390_I32x4SConvertF32x4) \
- V(S390_I32x4UConvertF32x4) \
- V(S390_I32x4SConvertI16x8Low) \
- V(S390_I32x4SConvertI16x8High) \
- V(S390_I32x4UConvertI16x8Low) \
- V(S390_I32x4UConvertI16x8High) \
- V(S390_I32x4Abs) \
- V(S390_I32x4BitMask) \
- V(S390_I32x4DotI16x8S) \
- V(S390_I32x4ExtMulLowI16x8S) \
- V(S390_I32x4ExtMulHighI16x8S) \
- V(S390_I32x4ExtMulLowI16x8U) \
- V(S390_I32x4ExtMulHighI16x8U) \
- V(S390_I32x4ExtAddPairwiseI16x8S) \
- V(S390_I32x4ExtAddPairwiseI16x8U) \
- V(S390_I32x4TruncSatF64x2SZero) \
- V(S390_I32x4TruncSatF64x2UZero) \
- V(S390_I16x8Splat) \
- V(S390_I16x8ExtractLaneU) \
- V(S390_I16x8ExtractLaneS) \
- V(S390_I16x8ReplaceLane) \
- V(S390_I16x8Add) \
- V(S390_I16x8Sub) \
- V(S390_I16x8Mul) \
- V(S390_I16x8MinS) \
- V(S390_I16x8MinU) \
- V(S390_I16x8MaxS) \
- V(S390_I16x8MaxU) \
- V(S390_I16x8Eq) \
- V(S390_I16x8Ne) \
- V(S390_I16x8GtS) \
- V(S390_I16x8GeS) \
- V(S390_I16x8GtU) \
- V(S390_I16x8GeU) \
- V(S390_I16x8Shl) \
- V(S390_I16x8ShrS) \
- V(S390_I16x8ShrU) \
- V(S390_I16x8Neg) \
- V(S390_I16x8SConvertI32x4) \
- V(S390_I16x8UConvertI32x4) \
- V(S390_I16x8SConvertI8x16Low) \
- V(S390_I16x8SConvertI8x16High) \
- V(S390_I16x8UConvertI8x16Low) \
- V(S390_I16x8UConvertI8x16High) \
- V(S390_I16x8AddSatS) \
- V(S390_I16x8SubSatS) \
- V(S390_I16x8AddSatU) \
- V(S390_I16x8SubSatU) \
- V(S390_I16x8RoundingAverageU) \
- V(S390_I16x8Abs) \
- V(S390_I16x8BitMask) \
- V(S390_I16x8ExtMulLowI8x16S) \
- V(S390_I16x8ExtMulHighI8x16S) \
- V(S390_I16x8ExtMulLowI8x16U) \
- V(S390_I16x8ExtMulHighI8x16U) \
- V(S390_I16x8ExtAddPairwiseI8x16S) \
- V(S390_I16x8ExtAddPairwiseI8x16U) \
- V(S390_I16x8Q15MulRSatS) \
- V(S390_I8x16Splat) \
- V(S390_I8x16ExtractLaneU) \
- V(S390_I8x16ExtractLaneS) \
- V(S390_I8x16ReplaceLane) \
- V(S390_I8x16Add) \
- V(S390_I8x16Sub) \
- V(S390_I8x16MinS) \
- V(S390_I8x16MinU) \
- V(S390_I8x16MaxS) \
- V(S390_I8x16MaxU) \
- V(S390_I8x16Eq) \
- V(S390_I8x16Ne) \
- V(S390_I8x16GtS) \
- V(S390_I8x16GeS) \
- V(S390_I8x16GtU) \
- V(S390_I8x16GeU) \
- V(S390_I8x16Shl) \
- V(S390_I8x16ShrS) \
- V(S390_I8x16ShrU) \
- V(S390_I8x16Neg) \
- V(S390_I8x16SConvertI16x8) \
- V(S390_I8x16UConvertI16x8) \
- V(S390_I8x16AddSatS) \
- V(S390_I8x16SubSatS) \
- V(S390_I8x16AddSatU) \
- V(S390_I8x16SubSatU) \
- V(S390_I8x16RoundingAverageU) \
- V(S390_I8x16Abs) \
- V(S390_I8x16BitMask) \
- V(S390_I8x16Shuffle) \
- V(S390_I8x16Swizzle) \
- V(S390_I8x16Popcnt) \
- V(S390_I64x2AllTrue) \
- V(S390_I32x4AllTrue) \
- V(S390_I16x8AllTrue) \
- V(S390_I8x16AllTrue) \
- V(S390_V128AnyTrue) \
- V(S390_S128And) \
- V(S390_S128Or) \
- V(S390_S128Xor) \
- V(S390_S128Const) \
- V(S390_S128Zero) \
- V(S390_S128AllOnes) \
- V(S390_S128Not) \
- V(S390_S128Select) \
- V(S390_S128AndNot) \
- V(S390_S128Load8Splat) \
- V(S390_S128Load16Splat) \
- V(S390_S128Load32Splat) \
- V(S390_S128Load64Splat) \
- V(S390_S128Load8x8S) \
- V(S390_S128Load8x8U) \
- V(S390_S128Load16x4S) \
- V(S390_S128Load16x4U) \
- V(S390_S128Load32x2S) \
- V(S390_S128Load32x2U) \
- V(S390_S128Load32Zero) \
- V(S390_S128Load64Zero) \
- V(S390_S128Load8Lane) \
- V(S390_S128Load16Lane) \
- V(S390_S128Load32Lane) \
- V(S390_S128Load64Lane) \
- V(S390_S128Store8Lane) \
- V(S390_S128Store16Lane) \
- V(S390_S128Store32Lane) \
- V(S390_S128Store64Lane) \
- V(S390_StoreSimd128) \
- V(S390_LoadSimd128) \
- V(S390_StoreCompressTagged) \
- V(S390_LoadDecompressTaggedSigned) \
- V(S390_LoadDecompressTaggedPointer) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(S390_Peek) \
+ V(S390_Abs32) \
+ V(S390_Abs64) \
+ V(S390_And32) \
+ V(S390_And64) \
+ V(S390_Or32) \
+ V(S390_Or64) \
+ V(S390_Xor32) \
+ V(S390_Xor64) \
+ V(S390_ShiftLeft32) \
+ V(S390_ShiftLeft64) \
+ V(S390_ShiftRight32) \
+ V(S390_ShiftRight64) \
+ V(S390_ShiftRightArith32) \
+ V(S390_ShiftRightArith64) \
+ V(S390_RotRight32) \
+ V(S390_RotRight64) \
+ V(S390_Not32) \
+ V(S390_Not64) \
+ V(S390_RotLeftAndClear64) \
+ V(S390_RotLeftAndClearLeft64) \
+ V(S390_RotLeftAndClearRight64) \
+ V(S390_Lay) \
+ V(S390_Add32) \
+ V(S390_Add64) \
+ V(S390_AddFloat) \
+ V(S390_AddDouble) \
+ V(S390_Sub32) \
+ V(S390_Sub64) \
+ V(S390_SubFloat) \
+ V(S390_SubDouble) \
+ V(S390_Mul32) \
+ V(S390_Mul32WithOverflow) \
+ V(S390_Mul64) \
+ V(S390_MulHigh32) \
+ V(S390_MulHighU32) \
+ V(S390_MulFloat) \
+ V(S390_MulDouble) \
+ V(S390_Div32) \
+ V(S390_Div64) \
+ V(S390_DivU32) \
+ V(S390_DivU64) \
+ V(S390_DivFloat) \
+ V(S390_DivDouble) \
+ V(S390_Mod32) \
+ V(S390_Mod64) \
+ V(S390_ModU32) \
+ V(S390_ModU64) \
+ V(S390_ModDouble) \
+ V(S390_Neg32) \
+ V(S390_Neg64) \
+ V(S390_NegDouble) \
+ V(S390_NegFloat) \
+ V(S390_SqrtFloat) \
+ V(S390_FloorFloat) \
+ V(S390_CeilFloat) \
+ V(S390_TruncateFloat) \
+ V(S390_FloatNearestInt) \
+ V(S390_AbsFloat) \
+ V(S390_SqrtDouble) \
+ V(S390_FloorDouble) \
+ V(S390_CeilDouble) \
+ V(S390_TruncateDouble) \
+ V(S390_RoundDouble) \
+ V(S390_DoubleNearestInt) \
+ V(S390_MaxFloat) \
+ V(S390_MaxDouble) \
+ V(S390_MinFloat) \
+ V(S390_MinDouble) \
+ V(S390_AbsDouble) \
+ V(S390_Cntlz32) \
+ V(S390_Cntlz64) \
+ V(S390_Popcnt32) \
+ V(S390_Popcnt64) \
+ V(S390_Cmp32) \
+ V(S390_Cmp64) \
+ V(S390_CmpFloat) \
+ V(S390_CmpDouble) \
+ V(S390_Tst32) \
+ V(S390_Tst64) \
+ V(S390_Push) \
+ V(S390_PushFrame) \
+ V(S390_StoreToStackSlot) \
+ V(S390_SignExtendWord8ToInt32) \
+ V(S390_SignExtendWord16ToInt32) \
+ V(S390_SignExtendWord8ToInt64) \
+ V(S390_SignExtendWord16ToInt64) \
+ V(S390_SignExtendWord32ToInt64) \
+ V(S390_Uint32ToUint64) \
+ V(S390_Int64ToInt32) \
+ V(S390_Int64ToFloat32) \
+ V(S390_Int64ToDouble) \
+ V(S390_Uint64ToFloat32) \
+ V(S390_Uint64ToDouble) \
+ V(S390_Int32ToFloat32) \
+ V(S390_Int32ToDouble) \
+ V(S390_Uint32ToFloat32) \
+ V(S390_Uint32ToDouble) \
+ V(S390_Float32ToInt64) \
+ V(S390_Float32ToUint64) \
+ V(S390_Float32ToInt32) \
+ V(S390_Float32ToUint32) \
+ V(S390_Float32ToDouble) \
+ V(S390_Float64SilenceNaN) \
+ V(S390_DoubleToInt32) \
+ V(S390_DoubleToUint32) \
+ V(S390_DoubleToInt64) \
+ V(S390_DoubleToUint64) \
+ V(S390_DoubleToFloat32) \
+ V(S390_DoubleExtractLowWord32) \
+ V(S390_DoubleExtractHighWord32) \
+ V(S390_DoubleInsertLowWord32) \
+ V(S390_DoubleInsertHighWord32) \
+ V(S390_DoubleConstruct) \
+ V(S390_BitcastInt32ToFloat32) \
+ V(S390_BitcastFloat32ToInt32) \
+ V(S390_BitcastInt64ToDouble) \
+ V(S390_BitcastDoubleToInt64) \
+ V(S390_LoadWordS8) \
+ V(S390_LoadWordU8) \
+ V(S390_LoadWordS16) \
+ V(S390_LoadWordU16) \
+ V(S390_LoadWordS32) \
+ V(S390_LoadWordU32) \
+ V(S390_LoadAndTestWord32) \
+ V(S390_LoadAndTestWord64) \
+ V(S390_LoadAndTestFloat32) \
+ V(S390_LoadAndTestFloat64) \
+ V(S390_LoadReverse16RR) \
+ V(S390_LoadReverse32RR) \
+ V(S390_LoadReverse64RR) \
+ V(S390_LoadReverseSimd128RR) \
+ V(S390_LoadReverseSimd128) \
+ V(S390_LoadReverse16) \
+ V(S390_LoadReverse32) \
+ V(S390_LoadReverse64) \
+ V(S390_LoadWord64) \
+ V(S390_LoadFloat32) \
+ V(S390_LoadDouble) \
+ V(S390_StoreWord8) \
+ V(S390_StoreWord16) \
+ V(S390_StoreWord32) \
+ V(S390_StoreWord64) \
+ V(S390_StoreReverse16) \
+ V(S390_StoreReverse32) \
+ V(S390_StoreReverse64) \
+ V(S390_StoreReverseSimd128) \
+ V(S390_StoreFloat32) \
+ V(S390_StoreDouble) \
+ V(S390_Word64AtomicExchangeUint64) \
+ V(S390_Word64AtomicCompareExchangeUint64) \
+ V(S390_Word64AtomicAddUint64) \
+ V(S390_Word64AtomicSubUint64) \
+ V(S390_Word64AtomicAndUint64) \
+ V(S390_Word64AtomicOrUint64) \
+ V(S390_Word64AtomicXorUint64) \
+ V(S390_F64x2Splat) \
+ V(S390_F64x2ReplaceLane) \
+ V(S390_F64x2Abs) \
+ V(S390_F64x2Neg) \
+ V(S390_F64x2Sqrt) \
+ V(S390_F64x2Add) \
+ V(S390_F64x2Sub) \
+ V(S390_F64x2Mul) \
+ V(S390_F64x2Div) \
+ V(S390_F64x2Eq) \
+ V(S390_F64x2Ne) \
+ V(S390_F64x2Lt) \
+ V(S390_F64x2Le) \
+ V(S390_F64x2Min) \
+ V(S390_F64x2Max) \
+ V(S390_F64x2ExtractLane) \
+ V(S390_F64x2Qfma) \
+ V(S390_F64x2Qfms) \
+ V(S390_F64x2Pmin) \
+ V(S390_F64x2Pmax) \
+ V(S390_F64x2Ceil) \
+ V(S390_F64x2Floor) \
+ V(S390_F64x2Trunc) \
+ V(S390_F64x2NearestInt) \
+ V(S390_F64x2ConvertLowI32x4S) \
+ V(S390_F64x2ConvertLowI32x4U) \
+ V(S390_F64x2PromoteLowF32x4) \
+ V(S390_F32x4Splat) \
+ V(S390_F32x4ExtractLane) \
+ V(S390_F32x4ReplaceLane) \
+ V(S390_F32x4Add) \
+ V(S390_F32x4Sub) \
+ V(S390_F32x4Mul) \
+ V(S390_F32x4Eq) \
+ V(S390_F32x4Ne) \
+ V(S390_F32x4Lt) \
+ V(S390_F32x4Le) \
+ V(S390_F32x4Abs) \
+ V(S390_F32x4Neg) \
+ V(S390_F32x4RecipApprox) \
+ V(S390_F32x4RecipSqrtApprox) \
+ V(S390_F32x4SConvertI32x4) \
+ V(S390_F32x4UConvertI32x4) \
+ V(S390_F32x4Sqrt) \
+ V(S390_F32x4Div) \
+ V(S390_F32x4Min) \
+ V(S390_F32x4Max) \
+ V(S390_F32x4Qfma) \
+ V(S390_F32x4Qfms) \
+ V(S390_F32x4Pmin) \
+ V(S390_F32x4Pmax) \
+ V(S390_F32x4Ceil) \
+ V(S390_F32x4Floor) \
+ V(S390_F32x4Trunc) \
+ V(S390_F32x4NearestInt) \
+ V(S390_F32x4DemoteF64x2Zero) \
+ V(S390_I64x2Neg) \
+ V(S390_I64x2Add) \
+ V(S390_I64x2Sub) \
+ V(S390_I64x2Shl) \
+ V(S390_I64x2ShrS) \
+ V(S390_I64x2ShrU) \
+ V(S390_I64x2Mul) \
+ V(S390_I64x2Splat) \
+ V(S390_I64x2ReplaceLane) \
+ V(S390_I64x2ExtractLane) \
+ V(S390_I64x2Eq) \
+ V(S390_I64x2BitMask) \
+ V(S390_I64x2ExtMulLowI32x4S) \
+ V(S390_I64x2ExtMulHighI32x4S) \
+ V(S390_I64x2ExtMulLowI32x4U) \
+ V(S390_I64x2ExtMulHighI32x4U) \
+ V(S390_I64x2SConvertI32x4Low) \
+ V(S390_I64x2SConvertI32x4High) \
+ V(S390_I64x2UConvertI32x4Low) \
+ V(S390_I64x2UConvertI32x4High) \
+ V(S390_I64x2Ne) \
+ V(S390_I64x2GtS) \
+ V(S390_I64x2GeS) \
+ V(S390_I64x2Abs) \
+ V(S390_I32x4Splat) \
+ V(S390_I32x4ExtractLane) \
+ V(S390_I32x4ReplaceLane) \
+ V(S390_I32x4Add) \
+ V(S390_I32x4Sub) \
+ V(S390_I32x4Mul) \
+ V(S390_I32x4MinS) \
+ V(S390_I32x4MinU) \
+ V(S390_I32x4MaxS) \
+ V(S390_I32x4MaxU) \
+ V(S390_I32x4Eq) \
+ V(S390_I32x4Ne) \
+ V(S390_I32x4GtS) \
+ V(S390_I32x4GeS) \
+ V(S390_I32x4GtU) \
+ V(S390_I32x4GeU) \
+ V(S390_I32x4Neg) \
+ V(S390_I32x4Shl) \
+ V(S390_I32x4ShrS) \
+ V(S390_I32x4ShrU) \
+ V(S390_I32x4SConvertF32x4) \
+ V(S390_I32x4UConvertF32x4) \
+ V(S390_I32x4SConvertI16x8Low) \
+ V(S390_I32x4SConvertI16x8High) \
+ V(S390_I32x4UConvertI16x8Low) \
+ V(S390_I32x4UConvertI16x8High) \
+ V(S390_I32x4Abs) \
+ V(S390_I32x4BitMask) \
+ V(S390_I32x4DotI16x8S) \
+ V(S390_I32x4ExtMulLowI16x8S) \
+ V(S390_I32x4ExtMulHighI16x8S) \
+ V(S390_I32x4ExtMulLowI16x8U) \
+ V(S390_I32x4ExtMulHighI16x8U) \
+ V(S390_I32x4ExtAddPairwiseI16x8S) \
+ V(S390_I32x4ExtAddPairwiseI16x8U) \
+ V(S390_I32x4TruncSatF64x2SZero) \
+ V(S390_I32x4TruncSatF64x2UZero) \
+ V(S390_I16x8Splat) \
+ V(S390_I16x8ExtractLaneU) \
+ V(S390_I16x8ExtractLaneS) \
+ V(S390_I16x8ReplaceLane) \
+ V(S390_I16x8Add) \
+ V(S390_I16x8Sub) \
+ V(S390_I16x8Mul) \
+ V(S390_I16x8MinS) \
+ V(S390_I16x8MinU) \
+ V(S390_I16x8MaxS) \
+ V(S390_I16x8MaxU) \
+ V(S390_I16x8Eq) \
+ V(S390_I16x8Ne) \
+ V(S390_I16x8GtS) \
+ V(S390_I16x8GeS) \
+ V(S390_I16x8GtU) \
+ V(S390_I16x8GeU) \
+ V(S390_I16x8Shl) \
+ V(S390_I16x8ShrS) \
+ V(S390_I16x8ShrU) \
+ V(S390_I16x8Neg) \
+ V(S390_I16x8SConvertI32x4) \
+ V(S390_I16x8UConvertI32x4) \
+ V(S390_I16x8SConvertI8x16Low) \
+ V(S390_I16x8SConvertI8x16High) \
+ V(S390_I16x8UConvertI8x16Low) \
+ V(S390_I16x8UConvertI8x16High) \
+ V(S390_I16x8AddSatS) \
+ V(S390_I16x8SubSatS) \
+ V(S390_I16x8AddSatU) \
+ V(S390_I16x8SubSatU) \
+ V(S390_I16x8RoundingAverageU) \
+ V(S390_I16x8Abs) \
+ V(S390_I16x8BitMask) \
+ V(S390_I16x8ExtMulLowI8x16S) \
+ V(S390_I16x8ExtMulHighI8x16S) \
+ V(S390_I16x8ExtMulLowI8x16U) \
+ V(S390_I16x8ExtMulHighI8x16U) \
+ V(S390_I16x8ExtAddPairwiseI8x16S) \
+ V(S390_I16x8ExtAddPairwiseI8x16U) \
+ V(S390_I16x8Q15MulRSatS) \
+ V(S390_I8x16Splat) \
+ V(S390_I8x16ExtractLaneU) \
+ V(S390_I8x16ExtractLaneS) \
+ V(S390_I8x16ReplaceLane) \
+ V(S390_I8x16Add) \
+ V(S390_I8x16Sub) \
+ V(S390_I8x16MinS) \
+ V(S390_I8x16MinU) \
+ V(S390_I8x16MaxS) \
+ V(S390_I8x16MaxU) \
+ V(S390_I8x16Eq) \
+ V(S390_I8x16Ne) \
+ V(S390_I8x16GtS) \
+ V(S390_I8x16GeS) \
+ V(S390_I8x16GtU) \
+ V(S390_I8x16GeU) \
+ V(S390_I8x16Shl) \
+ V(S390_I8x16ShrS) \
+ V(S390_I8x16ShrU) \
+ V(S390_I8x16Neg) \
+ V(S390_I8x16SConvertI16x8) \
+ V(S390_I8x16UConvertI16x8) \
+ V(S390_I8x16AddSatS) \
+ V(S390_I8x16SubSatS) \
+ V(S390_I8x16AddSatU) \
+ V(S390_I8x16SubSatU) \
+ V(S390_I8x16RoundingAverageU) \
+ V(S390_I8x16Abs) \
+ V(S390_I8x16BitMask) \
+ V(S390_I8x16Shuffle) \
+ V(S390_I8x16Swizzle) \
+ V(S390_I8x16Popcnt) \
+ V(S390_I64x2AllTrue) \
+ V(S390_I32x4AllTrue) \
+ V(S390_I16x8AllTrue) \
+ V(S390_I8x16AllTrue) \
+ V(S390_V128AnyTrue) \
+ V(S390_S128And) \
+ V(S390_S128Or) \
+ V(S390_S128Xor) \
+ V(S390_S128Const) \
+ V(S390_S128Zero) \
+ V(S390_S128AllOnes) \
+ V(S390_S128Not) \
+ V(S390_S128Select) \
+ V(S390_S128AndNot) \
+ V(S390_S128Load8Splat) \
+ V(S390_S128Load16Splat) \
+ V(S390_S128Load32Splat) \
+ V(S390_S128Load64Splat) \
+ V(S390_S128Load8x8S) \
+ V(S390_S128Load8x8U) \
+ V(S390_S128Load16x4S) \
+ V(S390_S128Load16x4U) \
+ V(S390_S128Load32x2S) \
+ V(S390_S128Load32x2U) \
+ V(S390_S128Load32Zero) \
+ V(S390_S128Load64Zero) \
+ V(S390_S128Load8Lane) \
+ V(S390_S128Load16Lane) \
+ V(S390_S128Load32Lane) \
+ V(S390_S128Load64Lane) \
+ V(S390_S128Store8Lane) \
+ V(S390_S128Store16Lane) \
+ V(S390_S128Store32Lane) \
+ V(S390_S128Store64Lane) \
+ V(S390_StoreSimd128) \
+ V(S390_LoadSimd128) \
+ V(S390_StoreCompressTagged) \
+ V(S390_LoadDecompressTaggedSigned) \
+ V(S390_LoadDecompressTaggedPointer) \
V(S390_LoadDecompressAnyTagged)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index 489065e65f..120eaf41dc 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -689,9 +689,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
S390OperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r3));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), r3));
}
void InstructionSelector::VisitLoad(Node* node, Node* value,
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index 3e2298de3e..57e0143285 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -693,7 +693,7 @@ class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
int pc) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
+ const MemoryAccessMode access_mode = instr->memory_access_mode();
if (access_mode == kMemoryAccessProtected) {
zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr);
}
@@ -703,7 +703,7 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr, int pc) {
- DCHECK_NE(kMemoryAccessProtected, AccessModeField::decode(opcode));
+ DCHECK_NE(kMemoryAccessProtected, instr->memory_access_mode());
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -1305,7 +1305,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
- int const num_parameters = MiscField::decode(instr->opcode());
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
Label return_location;
#if V8_ENABLE_WEBASSEMBLY
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
@@ -1317,10 +1318,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif // V8_ENABLE_WEBASSEMBLY
if (HasImmediateInput(instr, 0)) {
ExternalReference ref = i.InputExternalReference(0);
- __ CallCFunction(ref, num_parameters);
+ __ CallCFunction(ref, num_gp_parameters + num_fp_parameters);
} else {
Register func = i.InputRegister(0);
- __ CallCFunction(func, num_parameters);
+ __ CallCFunction(func, num_gp_parameters + num_fp_parameters);
}
__ bind(&return_location);
#if V8_ENABLE_WEBASSEMBLY
@@ -1360,13 +1361,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == rdx);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ int3();
@@ -2194,21 +2195,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
case kX64Float32Abs: {
- __ Absps(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ Absps(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchRegister);
break;
}
case kX64Float32Neg: {
- __ Negps(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ Negps(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchRegister);
break;
}
case kX64F64x2Abs:
case kX64Float64Abs: {
- __ Abspd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ Abspd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchRegister);
break;
}
case kX64F64x2Neg:
case kX64Float64Neg: {
- __ Negpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ Negpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchRegister);
break;
}
case kSSEFloat64SilenceNaN:
@@ -2702,7 +2707,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F64x2PromoteLowF32x4: {
- __ Cvtps2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ if (HasAddressingMode(instr)) {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
+ __ Cvtps2pd(i.OutputSimd128Register(), i.MemoryOperand());
+ } else {
+ __ Cvtps2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ }
break;
}
case kX64F32x4DemoteF64x2Zero: {
@@ -2817,42 +2827,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F32x4Min: {
- XMMRegister src1 = i.InputSimd128Register(1),
- dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // The minps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minps in both orders, merge the resuls, and adjust.
- __ Movaps(kScratchDoubleReg, src1);
- __ Minps(kScratchDoubleReg, dst);
- __ Minps(dst, src1);
- // propagate -0's and NaNs, which may be non-canonical.
- __ Orps(kScratchDoubleReg, dst);
- // Canonicalize NaNs by quieting and clearing the payload.
- __ Cmpunordps(dst, kScratchDoubleReg);
- __ Orps(kScratchDoubleReg, dst);
- __ Psrld(dst, byte{10});
- __ Andnps(dst, kScratchDoubleReg);
+ __ F32x4Min(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kX64F32x4Max: {
- XMMRegister src1 = i.InputSimd128Register(1),
- dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // The maxps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxps in both orders, merge the resuls, and adjust.
- __ Movaps(kScratchDoubleReg, src1);
- __ Maxps(kScratchDoubleReg, dst);
- __ Maxps(dst, src1);
- // Find discrepancies.
- __ Xorps(dst, kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- __ Orps(kScratchDoubleReg, dst);
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- __ Subps(kScratchDoubleReg, dst);
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- __ Cmpunordps(dst, kScratchDoubleReg);
- __ Psrld(dst, byte{10});
- __ Andnps(dst, kScratchDoubleReg);
+ __ F32x4Max(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kX64F32x4Eq: {
@@ -2965,28 +2946,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I64x2Mul: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister left = i.InputSimd128Register(0);
- XMMRegister right = i.InputSimd128Register(1);
- XMMRegister tmp1 = i.TempSimd128Register(0);
- XMMRegister tmp2 = kScratchDoubleReg;
-
- __ Movdqa(tmp1, left);
- __ Movdqa(tmp2, right);
-
- // Multiply high dword of each qword of left with right.
- __ Psrlq(tmp1, byte{32});
- __ Pmuludq(tmp1, right);
-
- // Multiply high dword of each qword of right with left.
- __ Psrlq(tmp2, byte{32});
- __ Pmuludq(tmp2, left);
-
- __ Paddq(tmp2, tmp1);
- __ Psllq(tmp2, byte{32});
-
- __ Pmuludq(left, right);
- __ Paddq(left, tmp2); // left == dst
+ __ I64x2Mul(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.TempSimd128Register(0),
+ kScratchDoubleReg);
break;
}
case kX64I64x2Eq: {
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index e7fe45c5de..ad9906585c 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -11,389 +11,394 @@ namespace compiler {
// X64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(X64Add) \
- V(X64Add32) \
- V(X64And) \
- V(X64And32) \
- V(X64Cmp) \
- V(X64Cmp32) \
- V(X64Cmp16) \
- V(X64Cmp8) \
- V(X64Test) \
- V(X64Test32) \
- V(X64Test16) \
- V(X64Test8) \
- V(X64Or) \
- V(X64Or32) \
- V(X64Xor) \
- V(X64Xor32) \
- V(X64Sub) \
- V(X64Sub32) \
- V(X64Imul) \
- V(X64Imul32) \
- V(X64ImulHigh32) \
- V(X64UmulHigh32) \
- V(X64Idiv) \
- V(X64Idiv32) \
- V(X64Udiv) \
- V(X64Udiv32) \
- V(X64Not) \
- V(X64Not32) \
- V(X64Neg) \
- V(X64Neg32) \
- V(X64Shl) \
- V(X64Shl32) \
- V(X64Shr) \
- V(X64Shr32) \
- V(X64Sar) \
- V(X64Sar32) \
- V(X64Rol) \
- V(X64Rol32) \
- V(X64Ror) \
- V(X64Ror32) \
- V(X64Lzcnt) \
- V(X64Lzcnt32) \
- V(X64Tzcnt) \
- V(X64Tzcnt32) \
- V(X64Popcnt) \
- V(X64Popcnt32) \
- V(X64Bswap) \
- V(X64Bswap32) \
- V(X64MFence) \
- V(X64LFence) \
- V(SSEFloat32Cmp) \
- V(SSEFloat32Add) \
- V(SSEFloat32Sub) \
- V(SSEFloat32Mul) \
- V(SSEFloat32Div) \
- V(SSEFloat32Sqrt) \
- V(SSEFloat32ToFloat64) \
- V(SSEFloat32ToInt32) \
- V(SSEFloat32ToUint32) \
- V(SSEFloat32Round) \
- V(SSEFloat64Cmp) \
- V(SSEFloat64Add) \
- V(SSEFloat64Sub) \
- V(SSEFloat64Mul) \
- V(SSEFloat64Div) \
- V(SSEFloat64Mod) \
- V(SSEFloat64Sqrt) \
- V(SSEFloat64Round) \
- V(SSEFloat32Max) \
- V(SSEFloat64Max) \
- V(SSEFloat32Min) \
- V(SSEFloat64Min) \
- V(SSEFloat64ToFloat32) \
- V(SSEFloat64ToInt32) \
- V(SSEFloat64ToUint32) \
- V(SSEFloat32ToInt64) \
- V(SSEFloat64ToInt64) \
- V(SSEFloat32ToUint64) \
- V(SSEFloat64ToUint64) \
- V(SSEInt32ToFloat64) \
- V(SSEInt32ToFloat32) \
- V(SSEInt64ToFloat32) \
- V(SSEInt64ToFloat64) \
- V(SSEUint64ToFloat32) \
- V(SSEUint64ToFloat64) \
- V(SSEUint32ToFloat64) \
- V(SSEUint32ToFloat32) \
- V(SSEFloat64ExtractLowWord32) \
- V(SSEFloat64ExtractHighWord32) \
- V(SSEFloat64InsertLowWord32) \
- V(SSEFloat64InsertHighWord32) \
- V(SSEFloat64LoadLowWord32) \
- V(SSEFloat64SilenceNaN) \
- V(AVXFloat32Cmp) \
- V(AVXFloat32Add) \
- V(AVXFloat32Sub) \
- V(AVXFloat32Mul) \
- V(AVXFloat32Div) \
- V(AVXFloat64Cmp) \
- V(AVXFloat64Add) \
- V(AVXFloat64Sub) \
- V(AVXFloat64Mul) \
- V(AVXFloat64Div) \
- V(X64Float64Abs) \
- V(X64Float64Neg) \
- V(X64Float32Abs) \
- V(X64Float32Neg) \
- V(X64Movsxbl) \
- V(X64Movzxbl) \
- V(X64Movsxbq) \
- V(X64Movzxbq) \
- V(X64Movb) \
- V(X64Movsxwl) \
- V(X64Movzxwl) \
- V(X64Movsxwq) \
- V(X64Movzxwq) \
- V(X64Movw) \
- V(X64Movl) \
- V(X64Movsxlq) \
- V(X64MovqDecompressTaggedSigned) \
- V(X64MovqDecompressTaggedPointer) \
- V(X64MovqDecompressAnyTagged) \
- V(X64MovqCompressTagged) \
- V(X64Movq) \
- V(X64Movsd) \
- V(X64Movss) \
- V(X64Movdqu) \
- V(X64BitcastFI) \
- V(X64BitcastDL) \
- V(X64BitcastIF) \
- V(X64BitcastLD) \
- V(X64Lea32) \
- V(X64Lea) \
- V(X64Dec32) \
- V(X64Inc32) \
- V(X64Push) \
- V(X64Poke) \
- V(X64Peek) \
- V(X64F64x2Splat) \
- V(X64F64x2ExtractLane) \
- V(X64F64x2ReplaceLane) \
- V(X64F64x2Abs) \
- V(X64F64x2Neg) \
- V(X64F64x2Sqrt) \
- V(X64F64x2Add) \
- V(X64F64x2Sub) \
- V(X64F64x2Mul) \
- V(X64F64x2Div) \
- V(X64F64x2Min) \
- V(X64F64x2Max) \
- V(X64F64x2Eq) \
- V(X64F64x2Ne) \
- V(X64F64x2Lt) \
- V(X64F64x2Le) \
- V(X64F64x2Qfma) \
- V(X64F64x2Qfms) \
- V(X64F64x2Pmin) \
- V(X64F64x2Pmax) \
- V(X64F64x2Round) \
- V(X64F64x2ConvertLowI32x4S) \
- V(X64F64x2ConvertLowI32x4U) \
- V(X64F64x2PromoteLowF32x4) \
- V(X64F32x4Splat) \
- V(X64F32x4ExtractLane) \
- V(X64F32x4ReplaceLane) \
- V(X64F32x4SConvertI32x4) \
- V(X64F32x4UConvertI32x4) \
- V(X64F32x4Abs) \
- V(X64F32x4Neg) \
- V(X64F32x4Sqrt) \
- V(X64F32x4RecipApprox) \
- V(X64F32x4RecipSqrtApprox) \
- V(X64F32x4Add) \
- V(X64F32x4Sub) \
- V(X64F32x4Mul) \
- V(X64F32x4Div) \
- V(X64F32x4Min) \
- V(X64F32x4Max) \
- V(X64F32x4Eq) \
- V(X64F32x4Ne) \
- V(X64F32x4Lt) \
- V(X64F32x4Le) \
- V(X64F32x4Qfma) \
- V(X64F32x4Qfms) \
- V(X64F32x4Pmin) \
- V(X64F32x4Pmax) \
- V(X64F32x4Round) \
- V(X64F32x4DemoteF64x2Zero) \
- V(X64I64x2Splat) \
- V(X64I64x2ExtractLane) \
- V(X64I64x2Abs) \
- V(X64I64x2Neg) \
- V(X64I64x2BitMask) \
- V(X64I64x2Shl) \
- V(X64I64x2ShrS) \
- V(X64I64x2Add) \
- V(X64I64x2Sub) \
- V(X64I64x2Mul) \
- V(X64I64x2Eq) \
- V(X64I64x2GtS) \
- V(X64I64x2GeS) \
- V(X64I64x2Ne) \
- V(X64I64x2ShrU) \
- V(X64I64x2ExtMulLowI32x4S) \
- V(X64I64x2ExtMulHighI32x4S) \
- V(X64I64x2ExtMulLowI32x4U) \
- V(X64I64x2ExtMulHighI32x4U) \
- V(X64I64x2SConvertI32x4Low) \
- V(X64I64x2SConvertI32x4High) \
- V(X64I64x2UConvertI32x4Low) \
- V(X64I64x2UConvertI32x4High) \
- V(X64I32x4Splat) \
- V(X64I32x4ExtractLane) \
- V(X64I32x4SConvertF32x4) \
- V(X64I32x4SConvertI16x8Low) \
- V(X64I32x4SConvertI16x8High) \
- V(X64I32x4Neg) \
- V(X64I32x4Shl) \
- V(X64I32x4ShrS) \
- V(X64I32x4Add) \
- V(X64I32x4Sub) \
- V(X64I32x4Mul) \
- V(X64I32x4MinS) \
- V(X64I32x4MaxS) \
- V(X64I32x4Eq) \
- V(X64I32x4Ne) \
- V(X64I32x4GtS) \
- V(X64I32x4GeS) \
- V(X64I32x4UConvertF32x4) \
- V(X64I32x4UConvertI16x8Low) \
- V(X64I32x4UConvertI16x8High) \
- V(X64I32x4ShrU) \
- V(X64I32x4MinU) \
- V(X64I32x4MaxU) \
- V(X64I32x4GtU) \
- V(X64I32x4GeU) \
- V(X64I32x4Abs) \
- V(X64I32x4BitMask) \
- V(X64I32x4DotI16x8S) \
- V(X64I32x4ExtMulLowI16x8S) \
- V(X64I32x4ExtMulHighI16x8S) \
- V(X64I32x4ExtMulLowI16x8U) \
- V(X64I32x4ExtMulHighI16x8U) \
- V(X64I32x4ExtAddPairwiseI16x8S) \
- V(X64I32x4ExtAddPairwiseI16x8U) \
- V(X64I32x4TruncSatF64x2SZero) \
- V(X64I32x4TruncSatF64x2UZero) \
- V(X64I16x8Splat) \
- V(X64I16x8ExtractLaneS) \
- V(X64I16x8SConvertI8x16Low) \
- V(X64I16x8SConvertI8x16High) \
- V(X64I16x8Neg) \
- V(X64I16x8Shl) \
- V(X64I16x8ShrS) \
- V(X64I16x8SConvertI32x4) \
- V(X64I16x8Add) \
- V(X64I16x8AddSatS) \
- V(X64I16x8Sub) \
- V(X64I16x8SubSatS) \
- V(X64I16x8Mul) \
- V(X64I16x8MinS) \
- V(X64I16x8MaxS) \
- V(X64I16x8Eq) \
- V(X64I16x8Ne) \
- V(X64I16x8GtS) \
- V(X64I16x8GeS) \
- V(X64I16x8UConvertI8x16Low) \
- V(X64I16x8UConvertI8x16High) \
- V(X64I16x8ShrU) \
- V(X64I16x8UConvertI32x4) \
- V(X64I16x8AddSatU) \
- V(X64I16x8SubSatU) \
- V(X64I16x8MinU) \
- V(X64I16x8MaxU) \
- V(X64I16x8GtU) \
- V(X64I16x8GeU) \
- V(X64I16x8RoundingAverageU) \
- V(X64I16x8Abs) \
- V(X64I16x8BitMask) \
- V(X64I16x8ExtMulLowI8x16S) \
- V(X64I16x8ExtMulHighI8x16S) \
- V(X64I16x8ExtMulLowI8x16U) \
- V(X64I16x8ExtMulHighI8x16U) \
- V(X64I16x8ExtAddPairwiseI8x16S) \
- V(X64I16x8ExtAddPairwiseI8x16U) \
- V(X64I16x8Q15MulRSatS) \
- V(X64I8x16Splat) \
- V(X64I8x16ExtractLaneS) \
- V(X64Pinsrb) \
- V(X64Pinsrw) \
- V(X64Pinsrd) \
- V(X64Pinsrq) \
- V(X64Pextrb) \
- V(X64Pextrw) \
- V(X64I8x16SConvertI16x8) \
- V(X64I8x16Neg) \
- V(X64I8x16Shl) \
- V(X64I8x16ShrS) \
- V(X64I8x16Add) \
- V(X64I8x16AddSatS) \
- V(X64I8x16Sub) \
- V(X64I8x16SubSatS) \
- V(X64I8x16MinS) \
- V(X64I8x16MaxS) \
- V(X64I8x16Eq) \
- V(X64I8x16Ne) \
- V(X64I8x16GtS) \
- V(X64I8x16GeS) \
- V(X64I8x16UConvertI16x8) \
- V(X64I8x16AddSatU) \
- V(X64I8x16SubSatU) \
- V(X64I8x16ShrU) \
- V(X64I8x16MinU) \
- V(X64I8x16MaxU) \
- V(X64I8x16GtU) \
- V(X64I8x16GeU) \
- V(X64I8x16RoundingAverageU) \
- V(X64I8x16Abs) \
- V(X64I8x16BitMask) \
- V(X64S128Const) \
- V(X64S128Zero) \
- V(X64S128AllOnes) \
- V(X64S128Not) \
- V(X64S128And) \
- V(X64S128Or) \
- V(X64S128Xor) \
- V(X64S128Select) \
- V(X64S128AndNot) \
- V(X64I8x16Swizzle) \
- V(X64I8x16Shuffle) \
- V(X64I8x16Popcnt) \
- V(X64S128Load8Splat) \
- V(X64S128Load16Splat) \
- V(X64S128Load32Splat) \
- V(X64S128Load64Splat) \
- V(X64S128Load8x8S) \
- V(X64S128Load8x8U) \
- V(X64S128Load16x4S) \
- V(X64S128Load16x4U) \
- V(X64S128Load32x2S) \
- V(X64S128Load32x2U) \
- V(X64S128Store32Lane) \
- V(X64S128Store64Lane) \
- V(X64Shufps) \
- V(X64S32x4Rotate) \
- V(X64S32x4Swizzle) \
- V(X64S32x4Shuffle) \
- V(X64S16x8Blend) \
- V(X64S16x8HalfShuffle1) \
- V(X64S16x8HalfShuffle2) \
- V(X64S8x16Alignr) \
- V(X64S16x8Dup) \
- V(X64S8x16Dup) \
- V(X64S16x8UnzipHigh) \
- V(X64S16x8UnzipLow) \
- V(X64S8x16UnzipHigh) \
- V(X64S8x16UnzipLow) \
- V(X64S64x2UnpackHigh) \
- V(X64S32x4UnpackHigh) \
- V(X64S16x8UnpackHigh) \
- V(X64S8x16UnpackHigh) \
- V(X64S64x2UnpackLow) \
- V(X64S32x4UnpackLow) \
- V(X64S16x8UnpackLow) \
- V(X64S8x16UnpackLow) \
- V(X64S8x16TransposeLow) \
- V(X64S8x16TransposeHigh) \
- V(X64S8x8Reverse) \
- V(X64S8x4Reverse) \
- V(X64S8x2Reverse) \
- V(X64V128AnyTrue) \
- V(X64I64x2AllTrue) \
- V(X64I32x4AllTrue) \
- V(X64I16x8AllTrue) \
- V(X64I8x16AllTrue) \
- V(X64Word64AtomicAddUint64) \
- V(X64Word64AtomicSubUint64) \
- V(X64Word64AtomicAndUint64) \
- V(X64Word64AtomicOrUint64) \
- V(X64Word64AtomicXorUint64) \
- V(X64Word64AtomicStoreWord64) \
- V(X64Word64AtomicExchangeUint64) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(X64F64x2PromoteLowF32x4) \
+ V(X64Movb) \
+ V(X64Movdqu) \
+ V(X64Movl) \
+ V(X64Movq) \
+ V(X64Movsd) \
+ V(X64Movss) \
+ V(X64Movsxbl) \
+ V(X64Movsxbq) \
+ V(X64Movsxlq) \
+ V(X64Movsxwl) \
+ V(X64Movsxwq) \
+ V(X64Movw) \
+ V(X64Movzxbl) \
+ V(X64Movzxbq) \
+ V(X64Movzxwl) \
+ V(X64Movzxwq) \
+ V(X64Pextrb) \
+ V(X64Pextrw) \
+ V(X64Pinsrb) \
+ V(X64Pinsrd) \
+ V(X64Pinsrq) \
+ V(X64Pinsrw) \
+ V(X64S128Load16Splat) \
+ V(X64S128Load16x4S) \
+ V(X64S128Load16x4U) \
+ V(X64S128Load32Splat) \
+ V(X64S128Load32x2S) \
+ V(X64S128Load32x2U) \
+ V(X64S128Load64Splat) \
+ V(X64S128Load8Splat) \
+ V(X64S128Load8x8S) \
+ V(X64S128Load8x8U) \
+ V(X64S128Store32Lane) \
+ V(X64S128Store64Lane)
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(X64Add) \
+ V(X64Add32) \
+ V(X64And) \
+ V(X64And32) \
+ V(X64Cmp) \
+ V(X64Cmp32) \
+ V(X64Cmp16) \
+ V(X64Cmp8) \
+ V(X64Test) \
+ V(X64Test32) \
+ V(X64Test16) \
+ V(X64Test8) \
+ V(X64Or) \
+ V(X64Or32) \
+ V(X64Xor) \
+ V(X64Xor32) \
+ V(X64Sub) \
+ V(X64Sub32) \
+ V(X64Imul) \
+ V(X64Imul32) \
+ V(X64ImulHigh32) \
+ V(X64UmulHigh32) \
+ V(X64Idiv) \
+ V(X64Idiv32) \
+ V(X64Udiv) \
+ V(X64Udiv32) \
+ V(X64Not) \
+ V(X64Not32) \
+ V(X64Neg) \
+ V(X64Neg32) \
+ V(X64Shl) \
+ V(X64Shl32) \
+ V(X64Shr) \
+ V(X64Shr32) \
+ V(X64Sar) \
+ V(X64Sar32) \
+ V(X64Rol) \
+ V(X64Rol32) \
+ V(X64Ror) \
+ V(X64Ror32) \
+ V(X64Lzcnt) \
+ V(X64Lzcnt32) \
+ V(X64Tzcnt) \
+ V(X64Tzcnt32) \
+ V(X64Popcnt) \
+ V(X64Popcnt32) \
+ V(X64Bswap) \
+ V(X64Bswap32) \
+ V(X64MFence) \
+ V(X64LFence) \
+ V(SSEFloat32Cmp) \
+ V(SSEFloat32Add) \
+ V(SSEFloat32Sub) \
+ V(SSEFloat32Mul) \
+ V(SSEFloat32Div) \
+ V(SSEFloat32Sqrt) \
+ V(SSEFloat32ToFloat64) \
+ V(SSEFloat32ToInt32) \
+ V(SSEFloat32ToUint32) \
+ V(SSEFloat32Round) \
+ V(SSEFloat64Cmp) \
+ V(SSEFloat64Add) \
+ V(SSEFloat64Sub) \
+ V(SSEFloat64Mul) \
+ V(SSEFloat64Div) \
+ V(SSEFloat64Mod) \
+ V(SSEFloat64Sqrt) \
+ V(SSEFloat64Round) \
+ V(SSEFloat32Max) \
+ V(SSEFloat64Max) \
+ V(SSEFloat32Min) \
+ V(SSEFloat64Min) \
+ V(SSEFloat64ToFloat32) \
+ V(SSEFloat64ToInt32) \
+ V(SSEFloat64ToUint32) \
+ V(SSEFloat32ToInt64) \
+ V(SSEFloat64ToInt64) \
+ V(SSEFloat32ToUint64) \
+ V(SSEFloat64ToUint64) \
+ V(SSEInt32ToFloat64) \
+ V(SSEInt32ToFloat32) \
+ V(SSEInt64ToFloat32) \
+ V(SSEInt64ToFloat64) \
+ V(SSEUint64ToFloat32) \
+ V(SSEUint64ToFloat64) \
+ V(SSEUint32ToFloat64) \
+ V(SSEUint32ToFloat32) \
+ V(SSEFloat64ExtractLowWord32) \
+ V(SSEFloat64ExtractHighWord32) \
+ V(SSEFloat64InsertLowWord32) \
+ V(SSEFloat64InsertHighWord32) \
+ V(SSEFloat64LoadLowWord32) \
+ V(SSEFloat64SilenceNaN) \
+ V(AVXFloat32Cmp) \
+ V(AVXFloat32Add) \
+ V(AVXFloat32Sub) \
+ V(AVXFloat32Mul) \
+ V(AVXFloat32Div) \
+ V(AVXFloat64Cmp) \
+ V(AVXFloat64Add) \
+ V(AVXFloat64Sub) \
+ V(AVXFloat64Mul) \
+ V(AVXFloat64Div) \
+ V(X64Float64Abs) \
+ V(X64Float64Neg) \
+ V(X64Float32Abs) \
+ V(X64Float32Neg) \
+ V(X64MovqDecompressTaggedSigned) \
+ V(X64MovqDecompressTaggedPointer) \
+ V(X64MovqDecompressAnyTagged) \
+ V(X64MovqCompressTagged) \
+ V(X64BitcastFI) \
+ V(X64BitcastDL) \
+ V(X64BitcastIF) \
+ V(X64BitcastLD) \
+ V(X64Lea32) \
+ V(X64Lea) \
+ V(X64Dec32) \
+ V(X64Inc32) \
+ V(X64Push) \
+ V(X64Poke) \
+ V(X64Peek) \
+ V(X64F64x2Splat) \
+ V(X64F64x2ExtractLane) \
+ V(X64F64x2ReplaceLane) \
+ V(X64F64x2Abs) \
+ V(X64F64x2Neg) \
+ V(X64F64x2Sqrt) \
+ V(X64F64x2Add) \
+ V(X64F64x2Sub) \
+ V(X64F64x2Mul) \
+ V(X64F64x2Div) \
+ V(X64F64x2Min) \
+ V(X64F64x2Max) \
+ V(X64F64x2Eq) \
+ V(X64F64x2Ne) \
+ V(X64F64x2Lt) \
+ V(X64F64x2Le) \
+ V(X64F64x2Qfma) \
+ V(X64F64x2Qfms) \
+ V(X64F64x2Pmin) \
+ V(X64F64x2Pmax) \
+ V(X64F64x2Round) \
+ V(X64F64x2ConvertLowI32x4S) \
+ V(X64F64x2ConvertLowI32x4U) \
+ V(X64F32x4Splat) \
+ V(X64F32x4ExtractLane) \
+ V(X64F32x4ReplaceLane) \
+ V(X64F32x4SConvertI32x4) \
+ V(X64F32x4UConvertI32x4) \
+ V(X64F32x4Abs) \
+ V(X64F32x4Neg) \
+ V(X64F32x4Sqrt) \
+ V(X64F32x4RecipApprox) \
+ V(X64F32x4RecipSqrtApprox) \
+ V(X64F32x4Add) \
+ V(X64F32x4Sub) \
+ V(X64F32x4Mul) \
+ V(X64F32x4Div) \
+ V(X64F32x4Min) \
+ V(X64F32x4Max) \
+ V(X64F32x4Eq) \
+ V(X64F32x4Ne) \
+ V(X64F32x4Lt) \
+ V(X64F32x4Le) \
+ V(X64F32x4Qfma) \
+ V(X64F32x4Qfms) \
+ V(X64F32x4Pmin) \
+ V(X64F32x4Pmax) \
+ V(X64F32x4Round) \
+ V(X64F32x4DemoteF64x2Zero) \
+ V(X64I64x2Splat) \
+ V(X64I64x2ExtractLane) \
+ V(X64I64x2Abs) \
+ V(X64I64x2Neg) \
+ V(X64I64x2BitMask) \
+ V(X64I64x2Shl) \
+ V(X64I64x2ShrS) \
+ V(X64I64x2Add) \
+ V(X64I64x2Sub) \
+ V(X64I64x2Mul) \
+ V(X64I64x2Eq) \
+ V(X64I64x2GtS) \
+ V(X64I64x2GeS) \
+ V(X64I64x2Ne) \
+ V(X64I64x2ShrU) \
+ V(X64I64x2ExtMulLowI32x4S) \
+ V(X64I64x2ExtMulHighI32x4S) \
+ V(X64I64x2ExtMulLowI32x4U) \
+ V(X64I64x2ExtMulHighI32x4U) \
+ V(X64I64x2SConvertI32x4Low) \
+ V(X64I64x2SConvertI32x4High) \
+ V(X64I64x2UConvertI32x4Low) \
+ V(X64I64x2UConvertI32x4High) \
+ V(X64I32x4Splat) \
+ V(X64I32x4ExtractLane) \
+ V(X64I32x4SConvertF32x4) \
+ V(X64I32x4SConvertI16x8Low) \
+ V(X64I32x4SConvertI16x8High) \
+ V(X64I32x4Neg) \
+ V(X64I32x4Shl) \
+ V(X64I32x4ShrS) \
+ V(X64I32x4Add) \
+ V(X64I32x4Sub) \
+ V(X64I32x4Mul) \
+ V(X64I32x4MinS) \
+ V(X64I32x4MaxS) \
+ V(X64I32x4Eq) \
+ V(X64I32x4Ne) \
+ V(X64I32x4GtS) \
+ V(X64I32x4GeS) \
+ V(X64I32x4UConvertF32x4) \
+ V(X64I32x4UConvertI16x8Low) \
+ V(X64I32x4UConvertI16x8High) \
+ V(X64I32x4ShrU) \
+ V(X64I32x4MinU) \
+ V(X64I32x4MaxU) \
+ V(X64I32x4GtU) \
+ V(X64I32x4GeU) \
+ V(X64I32x4Abs) \
+ V(X64I32x4BitMask) \
+ V(X64I32x4DotI16x8S) \
+ V(X64I32x4ExtMulLowI16x8S) \
+ V(X64I32x4ExtMulHighI16x8S) \
+ V(X64I32x4ExtMulLowI16x8U) \
+ V(X64I32x4ExtMulHighI16x8U) \
+ V(X64I32x4ExtAddPairwiseI16x8S) \
+ V(X64I32x4ExtAddPairwiseI16x8U) \
+ V(X64I32x4TruncSatF64x2SZero) \
+ V(X64I32x4TruncSatF64x2UZero) \
+ V(X64I16x8Splat) \
+ V(X64I16x8ExtractLaneS) \
+ V(X64I16x8SConvertI8x16Low) \
+ V(X64I16x8SConvertI8x16High) \
+ V(X64I16x8Neg) \
+ V(X64I16x8Shl) \
+ V(X64I16x8ShrS) \
+ V(X64I16x8SConvertI32x4) \
+ V(X64I16x8Add) \
+ V(X64I16x8AddSatS) \
+ V(X64I16x8Sub) \
+ V(X64I16x8SubSatS) \
+ V(X64I16x8Mul) \
+ V(X64I16x8MinS) \
+ V(X64I16x8MaxS) \
+ V(X64I16x8Eq) \
+ V(X64I16x8Ne) \
+ V(X64I16x8GtS) \
+ V(X64I16x8GeS) \
+ V(X64I16x8UConvertI8x16Low) \
+ V(X64I16x8UConvertI8x16High) \
+ V(X64I16x8ShrU) \
+ V(X64I16x8UConvertI32x4) \
+ V(X64I16x8AddSatU) \
+ V(X64I16x8SubSatU) \
+ V(X64I16x8MinU) \
+ V(X64I16x8MaxU) \
+ V(X64I16x8GtU) \
+ V(X64I16x8GeU) \
+ V(X64I16x8RoundingAverageU) \
+ V(X64I16x8Abs) \
+ V(X64I16x8BitMask) \
+ V(X64I16x8ExtMulLowI8x16S) \
+ V(X64I16x8ExtMulHighI8x16S) \
+ V(X64I16x8ExtMulLowI8x16U) \
+ V(X64I16x8ExtMulHighI8x16U) \
+ V(X64I16x8ExtAddPairwiseI8x16S) \
+ V(X64I16x8ExtAddPairwiseI8x16U) \
+ V(X64I16x8Q15MulRSatS) \
+ V(X64I8x16Splat) \
+ V(X64I8x16ExtractLaneS) \
+ V(X64I8x16SConvertI16x8) \
+ V(X64I8x16Neg) \
+ V(X64I8x16Shl) \
+ V(X64I8x16ShrS) \
+ V(X64I8x16Add) \
+ V(X64I8x16AddSatS) \
+ V(X64I8x16Sub) \
+ V(X64I8x16SubSatS) \
+ V(X64I8x16MinS) \
+ V(X64I8x16MaxS) \
+ V(X64I8x16Eq) \
+ V(X64I8x16Ne) \
+ V(X64I8x16GtS) \
+ V(X64I8x16GeS) \
+ V(X64I8x16UConvertI16x8) \
+ V(X64I8x16AddSatU) \
+ V(X64I8x16SubSatU) \
+ V(X64I8x16ShrU) \
+ V(X64I8x16MinU) \
+ V(X64I8x16MaxU) \
+ V(X64I8x16GtU) \
+ V(X64I8x16GeU) \
+ V(X64I8x16RoundingAverageU) \
+ V(X64I8x16Abs) \
+ V(X64I8x16BitMask) \
+ V(X64S128Const) \
+ V(X64S128Zero) \
+ V(X64S128AllOnes) \
+ V(X64S128Not) \
+ V(X64S128And) \
+ V(X64S128Or) \
+ V(X64S128Xor) \
+ V(X64S128Select) \
+ V(X64S128AndNot) \
+ V(X64I8x16Swizzle) \
+ V(X64I8x16Shuffle) \
+ V(X64I8x16Popcnt) \
+ V(X64Shufps) \
+ V(X64S32x4Rotate) \
+ V(X64S32x4Swizzle) \
+ V(X64S32x4Shuffle) \
+ V(X64S16x8Blend) \
+ V(X64S16x8HalfShuffle1) \
+ V(X64S16x8HalfShuffle2) \
+ V(X64S8x16Alignr) \
+ V(X64S16x8Dup) \
+ V(X64S8x16Dup) \
+ V(X64S16x8UnzipHigh) \
+ V(X64S16x8UnzipLow) \
+ V(X64S8x16UnzipHigh) \
+ V(X64S8x16UnzipLow) \
+ V(X64S64x2UnpackHigh) \
+ V(X64S32x4UnpackHigh) \
+ V(X64S16x8UnpackHigh) \
+ V(X64S8x16UnpackHigh) \
+ V(X64S64x2UnpackLow) \
+ V(X64S32x4UnpackLow) \
+ V(X64S16x8UnpackLow) \
+ V(X64S8x16UnpackLow) \
+ V(X64S8x16TransposeLow) \
+ V(X64S8x16TransposeHigh) \
+ V(X64S8x8Reverse) \
+ V(X64S8x4Reverse) \
+ V(X64S8x2Reverse) \
+ V(X64V128AnyTrue) \
+ V(X64I64x2AllTrue) \
+ V(X64I32x4AllTrue) \
+ V(X64I16x8AllTrue) \
+ V(X64I8x16AllTrue) \
+ V(X64Word64AtomicAddUint64) \
+ V(X64Word64AtomicSubUint64) \
+ V(X64Word64AtomicAndUint64) \
+ V(X64Word64AtomicOrUint64) \
+ V(X64Word64AtomicXorUint64) \
+ V(X64Word64AtomicStoreWord64) \
+ V(X64Word64AtomicExchangeUint64) \
V(X64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index 2f44f0dee5..c477c44b07 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -16,6 +16,7 @@
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/opcodes.h"
#include "src/roots/roots-inl.h"
#if V8_ENABLE_WEBASSEMBLY
@@ -376,9 +377,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
X64OperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
}
void InstructionSelector::VisitLoadLane(Node* node) {
@@ -1006,15 +1007,15 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
kPositiveDisplacement);
return;
} else {
- Int64BinopMatcher m(node);
- if ((m.left().IsChangeInt32ToInt64() ||
- m.left().IsChangeUint32ToUint64()) &&
- m.right().IsInRange(32, 63)) {
+ Int64BinopMatcher bm(node);
+ if ((bm.left().IsChangeInt32ToInt64() ||
+ bm.left().IsChangeUint32ToUint64()) &&
+ bm.right().IsInRange(32, 63)) {
// There's no need to sign/zero-extend to 64-bit if we shift out the upper
// 32 bits anyway.
Emit(kX64Shl, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()->InputAt(0)),
- g.UseImmediate(m.right().node()));
+ g.UseRegister(bm.left().node()->InputAt(0)),
+ g.UseImmediate(bm.right().node()));
return;
}
}
@@ -2434,19 +2435,19 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
Int64BinopMatcher m(value);
if (m.right().Is(0)) {
// Try to combine the branch with a comparison.
- Node* const user = m.node();
- Node* const value = m.left().node();
- if (CanCover(user, value)) {
- switch (value->opcode()) {
+ Node* const eq_user = m.node();
+ Node* const eq_value = m.left().node();
+ if (CanCover(eq_user, eq_value)) {
+ switch (eq_value->opcode()) {
case IrOpcode::kInt64Sub:
- return VisitWordCompare(this, value, kX64Cmp, cont);
+ return VisitWordCompare(this, eq_value, kX64Cmp, cont);
case IrOpcode::kWord64And:
- return VisitWordCompare(this, value, kX64Test, cont);
+ return VisitWordCompare(this, eq_value, kX64Test, cont);
default:
break;
}
}
- return VisitCompareZero(this, user, value, kX64Cmp, cont);
+ return VisitCompareZero(this, eq_user, eq_value, kX64Cmp, cont);
}
return VisitWord64EqualImpl(this, value, cont);
}
@@ -3040,7 +3041,6 @@ VISIT_ATOMIC_BINOP(Xor)
#define SIMD_UNOP_LIST(V) \
V(F64x2Sqrt) \
V(F64x2ConvertLowI32x4S) \
- V(F64x2PromoteLowF32x4) \
V(F32x4SConvertI32x4) \
V(F32x4Abs) \
V(F32x4Neg) \
@@ -3842,6 +3842,26 @@ void InstructionSelector::VisitI64x2Abs(Node* node) {
}
}
+void InstructionSelector::VisitF64x2PromoteLowF32x4(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionCode code = kX64F64x2PromoteLowF32x4;
+ Node* input = node->InputAt(0);
+ LoadTransformMatcher m(input);
+
+ if (m.Is(LoadTransformation::kS128Load64Zero) && CanCover(node, input)) {
+ if (m.ResolvedValue().kind == MemoryAccessKind::kProtected) {
+ code |= AccessModeField::encode(kMemoryAccessProtected);
+ }
+ // LoadTransforms cannot be eliminated, so they are visited even if
+ // unused. Mark it as defined so that we don't visit it.
+ MarkAsDefined(input);
+ VisitLoad(node, input, code);
+ return;
+ }
+
+ VisitRR(this, node, code);
+}
+
void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
int first_input_index,
Node* node) {
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index 1515340503..d2fce8a276 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -5,6 +5,7 @@
#include "src/compiler/branch-elimination.h"
#include "src/base/small-vector.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
@@ -14,12 +15,15 @@ namespace internal {
namespace compiler {
BranchElimination::BranchElimination(Editor* editor, JSGraph* js_graph,
- Zone* zone, Phase phase)
+ Zone* zone,
+ SourcePositionTable* source_positions,
+ Phase phase)
: AdvancedReducer(editor),
jsgraph_(js_graph),
node_conditions_(js_graph->graph()->NodeCount(), zone),
reduced_(js_graph->graph()->NodeCount(), zone),
zone_(zone),
+ source_positions_(source_positions),
dead_(js_graph->Dead()),
phase_(phase) {}
@@ -158,6 +162,72 @@ Reduction BranchElimination::ReduceBranch(Node* node) {
return TakeConditionsFromFirstControl(node);
}
+// Simplify a trap following a merge.
+// Assuming condition is in control1's path conditions, and !condition is in
+// control2's path condtions, the following transformation takes place:
+//
+// control1 control2 condition effect1
+// \ / \ / |
+// Merge X | control1
+// | / \ | /
+// effect1 effect2 | | TrapIf control2
+// \ | /| ==> | \ /
+// EffectPhi | | effect2 Merge
+// | / | | /
+// condition | / \ | /
+// \ | / EffectPhi
+// TrapIf
+// TODO(manoskouk): We require that the trap's effect input is the Merge's
+// EffectPhi, so we can ensure that the new traps' effect inputs are not
+// dominated by the Merge. Can we relax this?
+bool BranchElimination::TryPullTrapIntoMerge(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kTrapIf ||
+ node->opcode() == IrOpcode::kTrapUnless);
+ Node* merge = NodeProperties::GetControlInput(node);
+ DCHECK_EQ(merge->opcode(), IrOpcode::kMerge);
+ Node* condition = NodeProperties::GetValueInput(node, 0);
+ Node* effect_input = NodeProperties::GetEffectInput(node);
+ if (!(effect_input->opcode() == IrOpcode::kEffectPhi &&
+ NodeProperties::GetControlInput(effect_input) == merge)) {
+ return false;
+ }
+
+ bool trapping_condition = node->opcode() == IrOpcode::kTrapIf;
+ base::SmallVector<Node*, 8> new_merge_inputs;
+ for (Edge edge : merge->input_edges()) {
+ Node* input = edge.to();
+ ControlPathConditions from_input = node_conditions_.Get(input);
+ Node* previous_branch;
+ bool condition_value;
+ if (!from_input.LookupCondition(condition, &previous_branch,
+ &condition_value)) {
+ return false;
+ }
+ if (condition_value == trapping_condition) {
+ Node* inputs[] = {
+ condition, NodeProperties::GetEffectInput(effect_input, edge.index()),
+ input};
+ Node* trap_clone = graph()->NewNode(node->op(), 3, inputs);
+ if (source_positions_) {
+ source_positions_->SetSourcePosition(
+ trap_clone, source_positions_->GetSourcePosition(node));
+ }
+ new_merge_inputs.emplace_back(trap_clone);
+ } else {
+ new_merge_inputs.emplace_back(input);
+ }
+ }
+
+ for (int i = 0; i < merge->InputCount(); i++) {
+ merge->ReplaceInput(i, new_merge_inputs[i]);
+ }
+ ReplaceWithValue(node, dead(), dead(), merge);
+ node->Kill();
+ Revisit(merge);
+
+ return true;
+}
+
Reduction BranchElimination::ReduceTrapConditional(Node* node) {
DCHECK(node->opcode() == IrOpcode::kTrapIf ||
node->opcode() == IrOpcode::kTrapUnless);
@@ -167,17 +237,59 @@ Reduction BranchElimination::ReduceTrapConditional(Node* node) {
// If we do not know anything about the predecessor, do not propagate just
// yet because we will have to recompute anyway once we compute the
// predecessor.
- if (!reduced_.Get(control_input)) {
- return NoChange();
+ if (!reduced_.Get(control_input)) return NoChange();
+
+ // If the trap comes directly after a merge, pull it into the merge. This will
+ // unlock other optimizations later.
+ if (control_input->opcode() == IrOpcode::kMerge &&
+ TryPullTrapIntoMerge(node)) {
+ return Replace(dead());
}
+
ControlPathConditions from_input = node_conditions_.Get(control_input);
- Node* branch;
+ Node* previous_branch;
bool condition_value;
- if (from_input.LookupCondition(condition, &branch, &condition_value)) {
+ if (from_input.LookupCondition(condition, &previous_branch,
+ &condition_value)) {
if (condition_value == trapping_condition) {
- // This will always trap. Mark its outputs as dead and connect it to
- // graph()->end().
+ // Special case: Trap directly inside a branch without sibling nodes.
+ // Replace the branch with the trap.
+ // condition control condition control
+ // | \ / \ /
+ // | Branch TrapIf
+ // | / \ ==> |
+ // | IfTrue IfFalse <subgraph2>
+ // | / |
+ // TrapIf <subraph2> Dead
+ // | |
+ // <subgraph1> <subgraph1>
+ // (and symmetrically for TrapUnless.)
+ if ((control_input->opcode() == IrOpcode::kIfTrue ||
+ control_input->opcode() == IrOpcode::kIfFalse) &&
+ control_input->UseCount() == 1) {
+ Node* branch = NodeProperties::GetControlInput(control_input);
+ DCHECK_EQ(branch->opcode(), IrOpcode::kBranch);
+ if (condition == NodeProperties::GetValueInput(branch, 0)) {
+ Node* other_if_branch = nullptr;
+ for (Node* use : branch->uses()) {
+ if (use != control_input) other_if_branch = use;
+ }
+ DCHECK_NOT_NULL(other_if_branch);
+
+ node->ReplaceInput(NodeProperties::FirstControlIndex(node),
+ NodeProperties::GetControlInput(branch));
+ ReplaceWithValue(node, dead(), dead(), dead());
+ ReplaceWithValue(other_if_branch, node, node, node);
+ other_if_branch->Kill();
+ control_input->Kill();
+ branch->Kill();
+ return Changed(node);
+ }
+ }
+
+ // General case: This will always trap. Mark its outputs as dead and
+ // connect it to graph()->end().
ReplaceWithValue(node, dead(), dead(), dead());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = graph()->NewNode(common()->Throw(), effect, node);
diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h
index 93bacbff7b..7964e0a1b9 100644
--- a/deps/v8/src/compiler/branch-elimination.h
+++ b/deps/v8/src/compiler/branch-elimination.h
@@ -19,6 +19,7 @@ namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
class JSGraph;
+class SourcePositionTable;
class V8_EXPORT_PRIVATE BranchElimination final
: public NON_EXPORTED_BASE(AdvancedReducer) {
@@ -28,7 +29,7 @@ class V8_EXPORT_PRIVATE BranchElimination final
kLATE,
};
BranchElimination(Editor* editor, JSGraph* js_graph, Zone* zone,
- Phase phase = kLATE);
+ SourcePositionTable* sourse_positions, Phase phase = kLATE);
~BranchElimination() final;
const char* reducer_name() const override { return "BranchElimination"; }
@@ -108,6 +109,7 @@ class V8_EXPORT_PRIVATE BranchElimination final
Reduction ReduceStart(Node* node);
Reduction ReduceOtherControl(Node* node);
void SimplifyBranchCondition(Node* branch);
+ bool TryPullTrapIntoMerge(Node* node);
Reduction TakeConditionsFromFirstControl(Node* node);
Reduction UpdateConditions(Node* node, ControlPathConditions conditions);
@@ -131,6 +133,7 @@ class V8_EXPORT_PRIVATE BranchElimination final
node_conditions_;
NodeAuxData<bool> reduced_;
Zone* zone_;
+ SourcePositionTable* source_positions_;
Node* dead_;
Phase phase_;
};
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index e62babccf1..95a84ceeab 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -66,6 +66,8 @@ namespace {
// == arm64 ====================================================================
// ===========================================================================
#define PARAM_REGISTERS x0, x1, x2, x3, x4, x5, x6, x7
+#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
+#define FP_RETURN_REGISTER d0
#define CALLEE_SAVE_REGISTERS \
(1 << x19.code()) | (1 << x20.code()) | (1 << x21.code()) | \
(1 << x22.code()) | (1 << x23.code()) | (1 << x24.code()) | \
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index d27744072a..a723d21a10 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -245,9 +245,9 @@ void CodeAssembler::GenerateCheckMaybeObjectIsObject(TNode<MaybeObject> node,
base::EmbeddedVector<char, 1024> message;
SNPrintF(message, "no Object: %s", location);
TNode<String> message_node = StringConstant(message.begin());
- // This somewhat misuses the AbortCSAAssert runtime function. This will print
- // "abort: CSA_ASSERT failed: <message>", which is good enough.
- AbortCSAAssert(message_node);
+ // This somewhat misuses the AbortCSADcheck runtime function. This will print
+ // "abort: CSA_DCHECK failed: <message>", which is good enough.
+ AbortCSADcheck(message_node);
Unreachable();
Bind(&ok);
}
@@ -503,8 +503,8 @@ void CodeAssembler::ReturnIf(TNode<BoolT> condition, TNode<Object> value) {
Bind(&if_continue);
}
-void CodeAssembler::AbortCSAAssert(Node* message) {
- raw_assembler()->AbortCSAAssert(message);
+void CodeAssembler::AbortCSADcheck(Node* message) {
+ raw_assembler()->AbortCSADcheck(message);
}
void CodeAssembler::DebugBreak() { raw_assembler()->DebugBreak(); }
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 7a22086260..fcef5bdd72 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -631,7 +631,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void ReturnIf(TNode<BoolT> condition, TNode<Object> value);
- void AbortCSAAssert(Node* message);
+ void AbortCSADcheck(Node* message);
void DebugBreak();
void Unreachable();
void Comment(const char* msg) {
diff --git a/deps/v8/src/compiler/csa-load-elimination.cc b/deps/v8/src/compiler/csa-load-elimination.cc
index b5df8b542b..ece79a7156 100644
--- a/deps/v8/src/compiler/csa-load-elimination.cc
+++ b/deps/v8/src/compiler/csa-load-elimination.cc
@@ -46,7 +46,7 @@ Reduction CsaLoadElimination::Reduce(Node* node) {
case IrOpcode::kStoreToObject:
return ReduceStoreToObject(node, ObjectAccessOf(node->op()));
case IrOpcode::kDebugBreak:
- case IrOpcode::kAbortCSAAssert:
+ case IrOpcode::kAbortCSADcheck:
// Avoid changing optimizations in the presence of debug instructions.
return PropagateInputState(node);
case IrOpcode::kCall:
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 83eb6c215c..9d000724b5 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -5165,6 +5165,8 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument(
Node* value_is_smi = ObjectIsSmi(node);
__ GotoIf(value_is_smi, if_error);
+ ExternalReference::Type ref_type = ExternalReference::FAST_C_CALL;
+
switch (arg_type.GetSequenceType()) {
case CTypeInfo::SequenceType::kIsSequence: {
CHECK_EQ(arg_type.GetType(), CTypeInfo::Type::kVoid);
@@ -5185,8 +5187,8 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument(
kNoWriteBarrier),
stack_slot, 0, node);
- Node* target_address = __ ExternalConstant(
- ExternalReference::Create(c_functions[func_index].address));
+ Node* target_address = __ ExternalConstant(ExternalReference::Create(
+ c_functions[func_index].address, ref_type));
__ Goto(&merge, target_address, stack_slot);
break;
}
@@ -5199,8 +5201,8 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument(
fast_api_call::GetTypedArrayElementsKind(
overloads_resolution_result.element_type),
&next);
- Node* target_address = __ ExternalConstant(
- ExternalReference::Create(c_functions[func_index].address));
+ Node* target_address = __ ExternalConstant(ExternalReference::Create(
+ c_functions[func_index].address, ref_type));
__ Goto(&merge, target_address, stack_slot);
break;
}
@@ -5387,6 +5389,8 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
Node** const inputs = graph()->zone()->NewArray<Node*>(
kFastTargetAddressInputCount + c_arg_count + n.FastCallExtraInputCount());
+ ExternalReference::Type ref_type = ExternalReference::FAST_C_CALL;
+
// The inputs to {Call} node for the fast call look like:
// [fast callee, receiver, ... C arguments, [optional Options], effect,
// control].
@@ -5398,7 +5402,7 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
// with a Phi node created by AdaptOverloadedFastCallArgument.
inputs[kFastTargetAddressInputIndex] =
(c_functions.size() == 1) ? __ ExternalConstant(ExternalReference::Create(
- c_functions[0].address))
+ c_functions[0].address, ref_type))
: nullptr;
for (int i = 0; i < c_arg_count; ++i) {
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index 7ff6ab684f..bf693c71dc 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -510,12 +510,15 @@ int OffsetOfFieldAccess(const Operator* op) {
return access.offset;
}
-int OffsetOfElementAt(ElementAccess const& access, int index) {
+Maybe<int> OffsetOfElementAt(ElementAccess const& access, int index) {
+ MachineRepresentation representation = access.machine_type.representation();
+ // Double elements accesses are not yet supported. See chromium:1237821.
+ if (representation == MachineRepresentation::kFloat64) return Nothing<int>();
+
DCHECK_GE(index, 0);
- DCHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
- kTaggedSizeLog2);
- return access.header_size +
- (index << ElementSizeLog2Of(access.machine_type.representation()));
+ DCHECK_GE(ElementSizeLog2Of(representation), kTaggedSizeLog2);
+ return Just(access.header_size +
+ (index << ElementSizeLog2Of(representation)));
}
Maybe<int> OffsetOfElementsAccess(const Operator* op, Node* index_node) {
@@ -527,7 +530,7 @@ Maybe<int> OffsetOfElementsAccess(const Operator* op, Node* index_node) {
double min = index_type.Min();
int index = static_cast<int>(min);
if (index < 0 || index != min || index != max) return Nothing<int>();
- return Just(OffsetOfElementAt(ElementAccessOf(op), index));
+ return OffsetOfElementAt(ElementAccessOf(op), index);
}
Node* LowerCompareMapsWithoutLoad(Node* checked_map,
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index 907c7cc087..d3f9768fe7 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -139,6 +139,11 @@ class VirtualObject : public Dependable {
}
return Just(fields_.at(offset / kTaggedSize));
}
+ Maybe<Variable> FieldAt(Maybe<int> maybe_offset) const {
+ int offset;
+ if (!maybe_offset.To(&offset)) return Nothing<Variable>();
+ return FieldAt(offset);
+ }
Id id() const { return id_; }
int size() const { return static_cast<int>(kTaggedSize * fields_.size()); }
// Escaped might mean that the object escaped to untracked memory or that it
diff --git a/deps/v8/src/compiler/globals.h b/deps/v8/src/compiler/globals.h
index 392cb23917..23f834cd6c 100644
--- a/deps/v8/src/compiler/globals.h
+++ b/deps/v8/src/compiler/globals.h
@@ -92,7 +92,8 @@ const int kMaxFastLiteralProperties = JSObject::kMaxInObjectProperties;
// to add support for IA32, because it has a totally different approach
// (using FP stack). As support is added to more platforms, please make sure
// to list them here in order to enable tests of this functionality.
-#if defined(V8_TARGET_ARCH_X64)
+#if defined(V8_TARGET_ARCH_X64) || \
+ (defined(V8_TARGET_ARCH_ARM64) && !defined(USE_SIMULATOR))
#define V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
#endif
diff --git a/deps/v8/src/compiler/heap-refs.cc b/deps/v8/src/compiler/heap-refs.cc
index c246430de2..19c7bd1ef6 100644
--- a/deps/v8/src/compiler/heap-refs.cc
+++ b/deps/v8/src/compiler/heap-refs.cc
@@ -1272,7 +1272,7 @@ bool JSObjectData::SerializeAsBoilerplateRecursive(JSHeapBroker* broker,
boilerplate->map().instance_descriptors(isolate), isolate);
for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
- if (details.location() != kField) continue;
+ if (details.location() != PropertyLocation::kField) continue;
DCHECK_EQ(kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
@@ -1780,11 +1780,6 @@ MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const {
object()->FindFieldOwner(broker()->isolate(), descriptor_index));
}
-ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
- CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
- return instance_descriptors().GetFieldType(descriptor_index);
-}
-
base::Optional<ObjectRef> StringRef::GetCharAsStringOrUndefined(
uint32_t index) const {
DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
@@ -2605,12 +2600,6 @@ NameRef DescriptorArrayRef::GetPropertyKey(
return result;
}
-ObjectRef DescriptorArrayRef::GetFieldType(
- InternalIndex descriptor_index) const {
- return MakeRef(broker(),
- Object::cast(object()->GetFieldType(descriptor_index)));
-}
-
base::Optional<ObjectRef> DescriptorArrayRef::GetStrongValue(
InternalIndex descriptor_index) const {
HeapObject heap_object;
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index 4644071ea5..7f737c0c26 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -603,7 +603,6 @@ class DescriptorArrayRef : public HeapObjectRef {
PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const;
NameRef GetPropertyKey(InternalIndex descriptor_index) const;
- ObjectRef GetFieldType(InternalIndex descriptor_index) const;
base::Optional<ObjectRef> GetStrongValue(
InternalIndex descriptor_index) const;
};
@@ -742,7 +741,6 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const;
NameRef GetPropertyKey(InternalIndex descriptor_index) const;
FieldIndex GetFieldIndexFor(InternalIndex descriptor_index) const;
- ObjectRef GetFieldType(InternalIndex descriptor_index) const;
base::Optional<ObjectRef> GetStrongValue(
InternalIndex descriptor_number) const;
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 91197ead1e..de8dcfacba 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -4451,8 +4451,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Try to further reduce the JSCall {node}.
return Changed(node).FollowedBy(ReduceJSCall(node));
} else if (feedback_target.has_value() && feedback_target->IsFeedbackCell()) {
- FeedbackCellRef feedback_cell =
- MakeRef(broker(), feedback_target.value().AsFeedbackCell().object());
+ FeedbackCellRef feedback_cell = feedback_target.value().AsFeedbackCell();
// TODO(neis): This check seems unnecessary.
if (feedback_cell.feedback_vector().has_value()) {
// Check that {target} is a closure with given {feedback_cell},
@@ -5951,9 +5950,13 @@ Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) {
Effect effect = n.effect();
Control control = n.control();
- // Optimize for the case where we simply clone the {receiver},
- // i.e. when the {start} is zero and the {end} is undefined
- // (meaning it will be set to {receiver}s "length" property).
+ // Optimize for the case where we simply clone the {receiver}, i.e. when the
+ // {start} is zero and the {end} is undefined (meaning it will be set to
+ // {receiver}s "length" property). This logic should be in sync with
+ // ReduceArrayPrototypeSlice (to a reasonable degree). This is because
+ // CloneFastJSArray produces arrays which are potentially COW. If there's a
+ // discrepancy, TF generates code which produces a COW array and then expects
+ // it to be non-COW (or the other way around) -> immediate deopt.
if (!NumberMatcher(start).Is(0) ||
!HeapObjectMatcher(end).Is(factory()->undefined_value())) {
return NoChange();
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 60c9017fc2..1b79b9d786 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -1711,7 +1711,7 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
for (InternalIndex i : InternalIndex::Range(boilerplate_nof)) {
PropertyDetails const property_details =
boilerplate_map.GetPropertyDetails(i);
- if (property_details.location() != kField) continue;
+ if (property_details.location() != PropertyLocation::kField) continue;
DCHECK_EQ(kData, property_details.kind());
if ((*max_properties)-- == 0) return {};
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index deb8345bf7..b2e012d8c4 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -472,11 +472,24 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// Determine the call target.
base::Optional<SharedFunctionInfoRef> shared_info(DetermineCallTarget(node));
if (!shared_info.has_value()) return NoChange();
- DCHECK(shared_info->IsInlineable());
SharedFunctionInfoRef outer_shared_info =
MakeRef(broker(), info_->shared_info());
+ SharedFunctionInfo::Inlineability inlineability =
+ shared_info->GetInlineability();
+ if (inlineability != SharedFunctionInfo::kIsInlineable) {
+ // The function is no longer inlineable. The only way this can happen is if
+ // the function had its optimization disabled in the meantime, e.g. because
+ // another optimization job failed too often.
+ CHECK_EQ(inlineability, SharedFunctionInfo::kHasOptimizationDisabled);
+ TRACE("Not inlining " << *shared_info << " into " << outer_shared_info
+ << " because it had its optimization disabled.");
+ return NoChange();
+ }
+ // NOTE: Even though we bailout in the kHasOptimizationDisabled case above, we
+ // won't notice if the function's optimization is disabled after this point.
+
// Constructor must be constructable.
if (node->opcode() == IrOpcode::kJSConstruct &&
!IsConstructable(shared_info->kind())) {
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index cdbc4848cc..d100fd91af 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -1744,10 +1744,6 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
}
- for (ElementAccessInfo const& access_info : access_infos) {
- if (!IsTypedArrayElementsKind(access_info.elements_kind())) continue;
- }
-
// Check for the monomorphic case.
PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
if (access_infos.size() == 1) {
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index 956f13d7f9..38c523596c 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -153,6 +153,7 @@ class JSSpeculativeBinopBuilder final {
}
const Operator* SpeculativeBigIntOp(BigIntOperationHint hint) {
+ DCHECK(jsgraph()->machine()->Is64());
switch (op_->opcode()) {
case IrOpcode::kJSAdd:
return simplified()->SpeculativeBigIntAdd(hint);
@@ -206,6 +207,7 @@ class JSSpeculativeBinopBuilder final {
}
Node* TryBuildBigIntBinop() {
+ DCHECK(jsgraph()->machine()->Is64());
BigIntOperationHint hint;
if (GetBinaryBigIntOperationHint(&hint)) {
const Operator* op = SpeculativeBigIntOp(hint);
@@ -321,10 +323,13 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation(
jsgraph()->SmiConstant(-1), effect, control, slot);
node = b.TryBuildNumberBinop();
if (!node) {
- if (GetBinaryOperationHint(slot) == BinaryOperationHint::kBigInt) {
- const Operator* op = jsgraph()->simplified()->SpeculativeBigIntNegate(
- BigIntOperationHint::kBigInt);
- node = jsgraph()->graph()->NewNode(op, operand, effect, control);
+ if (jsgraph()->machine()->Is64()) {
+ if (GetBinaryOperationHint(slot) == BinaryOperationHint::kBigInt) {
+ const Operator* op =
+ jsgraph()->simplified()->SpeculativeBigIntNegate(
+ BigIntOperationHint::kBigInt);
+ node = jsgraph()->graph()->NewNode(op, operand, effect, control);
+ }
}
}
break;
@@ -403,8 +408,10 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
}
if (op->opcode() == IrOpcode::kJSAdd ||
op->opcode() == IrOpcode::kJSSubtract) {
- if (Node* node = b.TryBuildBigIntBinop()) {
- return LoweringResult::SideEffectFree(node, node, control);
+ if (jsgraph()->machine()->Is64()) {
+ if (Node* node = b.TryBuildBigIntBinop()) {
+ return LoweringResult::SideEffectFree(node, node, control);
+ }
}
}
break;
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index fec0040b61..2197fe6a65 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -208,6 +208,18 @@ int CallDescriptor::CalculateFixedFrameSize(CodeKind code_kind) const {
UNREACHABLE();
}
+void CallDescriptor::ComputeParamCounts() const {
+ gp_param_count_ = 0;
+ fp_param_count_ = 0;
+ for (size_t i = 0; i < ParameterCount(); ++i) {
+ if (IsFloatingPoint(GetParameterType(i).representation())) {
+ ++fp_param_count_.value();
+ } else {
+ ++gp_param_count_.value();
+ }
+ }
+}
+
CallDescriptor* Linkage::ComputeIncoming(Zone* zone,
OptimizedCompilationInfo* info) {
#if V8_ENABLE_WEBASSEMBLY
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 707c7d98ab..d157b44e03 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -305,9 +305,27 @@ class V8_EXPORT_PRIVATE CallDescriptor final
// The number of return values from this call.
size_t ReturnCount() const { return location_sig_->return_count(); }
- // The number of C parameters to this call.
+ // The number of C parameters to this call. The following invariant
+ // should hold true:
+ // ParameterCount() == GPParameterCount() + FPParameterCount()
size_t ParameterCount() const { return location_sig_->parameter_count(); }
+ // The number of general purpose C parameters to this call.
+ size_t GPParameterCount() const {
+ if (!gp_param_count_) {
+ ComputeParamCounts();
+ }
+ return gp_param_count_.value();
+ }
+
+ // The number of floating point C parameters to this call.
+ size_t FPParameterCount() const {
+ if (!fp_param_count_) {
+ ComputeParamCounts();
+ }
+ return fp_param_count_.value();
+ }
+
// The number of stack parameter slots to the call.
size_t ParameterSlotCount() const { return param_slot_count_; }
@@ -417,6 +435,8 @@ class V8_EXPORT_PRIVATE CallDescriptor final
}
private:
+ void ComputeParamCounts() const;
+
friend class Linkage;
const Kind kind_;
@@ -434,6 +454,9 @@ class V8_EXPORT_PRIVATE CallDescriptor final
const Flags flags_;
const StackArgumentOrder stack_order_;
const char* const debug_name_;
+
+ mutable base::Optional<size_t> gp_param_count_;
+ mutable base::Optional<size_t> fp_param_count_;
};
DEFINE_OPERATORS_FOR_FLAGS(CallDescriptor::Flags)
diff --git a/deps/v8/src/compiler/loop-unrolling.cc b/deps/v8/src/compiler/loop-unrolling.cc
index 973bb7af19..357b17a3ec 100644
--- a/deps/v8/src/compiler/loop-unrolling.cc
+++ b/deps/v8/src/compiler/loop-unrolling.cc
@@ -35,11 +35,11 @@ void UnrollLoop(Node* loop_node, ZoneUnorderedSet<Node*>* loop, uint32_t depth,
NodeVector copies(tmp_zone);
NodeCopier copier(graph, copied_size, &copies, unrolling_count);
- {
- copier.CopyNodes(graph, tmp_zone, graph->NewNode(common->Dead()),
- base::make_iterator_range(loop->begin(), loop->end()),
- source_positions, node_origins);
- }
+ source_positions->AddDecorator();
+ copier.CopyNodes(graph, tmp_zone, graph->NewNode(common->Dead()),
+ base::make_iterator_range(loop->begin(), loop->end()),
+ source_positions, node_origins);
+ source_positions->RemoveDecorator();
#define COPY(node, n) copier.map(node, n)
#define FOREACH_COPY_INDEX(i) for (uint32_t i = 0; i < unrolling_count; i++)
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index fedb208b5f..31f0526679 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -543,7 +543,7 @@ class MachineRepresentationChecker {
case IrOpcode::kParameter:
case IrOpcode::kProjection:
break;
- case IrOpcode::kAbortCSAAssert:
+ case IrOpcode::kAbortCSADcheck:
CheckValueInputIsTagged(node, 0);
break;
case IrOpcode::kLoad:
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 775e5ada81..db137dfeb4 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -1254,17 +1254,12 @@ Reduction MachineOperatorReducer::ReduceUint32Mod(Node* node) {
Reduction MachineOperatorReducer::ReduceStore(Node* node) {
NodeMatcher nm(node);
- MachineRepresentation rep;
- int value_input;
- if (nm.IsStore()) {
- rep = StoreRepresentationOf(node->op()).representation();
- value_input = 2;
- } else {
- DCHECK(nm.IsUnalignedStore());
- rep = UnalignedStoreRepresentationOf(node->op());
- value_input = 2;
- }
+ DCHECK(nm.IsStore() || nm.IsUnalignedStore());
+ MachineRepresentation rep =
+ nm.IsStore() ? StoreRepresentationOf(node->op()).representation()
+ : UnalignedStoreRepresentationOf(node->op());
+ const int value_input = 2;
Node* const value = node->InputAt(value_input);
switch (value->opcode()) {
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index d24030e1a7..e2d1686d5d 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -1242,12 +1242,12 @@ struct MachineOperatorGlobalCache {
};
BitcastMaybeObjectToWordOperator kBitcastMaybeObjectToWord;
- struct AbortCSAAssertOperator : public Operator {
- AbortCSAAssertOperator()
- : Operator(IrOpcode::kAbortCSAAssert, Operator::kNoThrow,
- "AbortCSAAssert", 1, 1, 1, 0, 1, 0) {}
+ struct AbortCSADcheckOperator : public Operator {
+ AbortCSADcheckOperator()
+ : Operator(IrOpcode::kAbortCSADcheck, Operator::kNoThrow,
+ "AbortCSADcheck", 1, 1, 1, 0, 1, 0) {}
};
- AbortCSAAssertOperator kAbortCSAAssert;
+ AbortCSADcheckOperator kAbortCSADcheck;
struct DebugBreakOperator : public Operator {
DebugBreakOperator()
@@ -1626,8 +1626,8 @@ const Operator* MachineOperatorBuilder::BitcastMaybeObjectToWord() {
return &cache_.kBitcastMaybeObjectToWord;
}
-const Operator* MachineOperatorBuilder::AbortCSAAssert() {
- return &cache_.kAbortCSAAssert;
+const Operator* MachineOperatorBuilder::AbortCSADcheck() {
+ return &cache_.kAbortCSADcheck;
}
const Operator* MachineOperatorBuilder::DebugBreak() {
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 7bd73663ab..493ea08ac1 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -119,6 +119,10 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
V8_EXPORT_PRIVATE LoadTransformParameters const& LoadTransformParametersOf(
Operator const*) V8_WARN_UNUSED_RESULT;
+V8_EXPORT_PRIVATE bool operator==(LoadTransformParameters,
+ LoadTransformParameters);
+bool operator!=(LoadTransformParameters, LoadTransformParameters);
+
struct LoadLaneParameters {
MemoryAccessKind kind;
LoadRepresentation rep;
@@ -404,7 +408,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
MachineOperatorBuilder& operator=(const MachineOperatorBuilder&) = delete;
const Operator* Comment(const char* msg);
- const Operator* AbortCSAAssert();
+ const Operator* AbortCSADcheck();
const Operator* DebugBreak();
const Operator* UnsafePointerAdd();
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index ba4a5c1f67..a92dd67c62 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -22,7 +22,7 @@ namespace {
bool CanAllocate(const Node* node) {
switch (node->opcode()) {
- case IrOpcode::kAbortCSAAssert:
+ case IrOpcode::kAbortCSADcheck:
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kComment:
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index 52dc476dc4..86e4884421 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -16,6 +16,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/objects/heap-object.h"
@@ -816,6 +817,14 @@ struct V8_EXPORT_PRIVATE DiamondMatcher
Node* if_false_;
};
+struct LoadTransformMatcher
+ : ValueMatcher<LoadTransformParameters, IrOpcode::kLoadTransform> {
+ explicit LoadTransformMatcher(Node* node) : ValueMatcher(node) {}
+ bool Is(LoadTransformation t) {
+ return HasResolvedValue() && ResolvedValue().transformation == t;
+ }
+};
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index b956f148cc..d3739f55b3 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -681,7 +681,7 @@
MACHINE_FLOAT64_BINOP_LIST(V) \
MACHINE_FLOAT64_UNOP_LIST(V) \
MACHINE_ATOMIC_OP_LIST(V) \
- V(AbortCSAAssert) \
+ V(AbortCSADcheck) \
V(DebugBreak) \
V(Comment) \
V(Load) \
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 8d3d93aa2a..d4e47f7361 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -1696,8 +1696,11 @@ struct WasmInliningPhase {
data->jsgraph()->Dead(), data->observe_node_manager());
DeadCodeElimination dead(&graph_reducer, data->graph(),
data->mcgraph()->common(), temp_zone);
+ // For now, hard-code inlining the function at index 0.
+ InlineByIndex heuristics({0});
WasmInliner inliner(&graph_reducer, env, data->source_positions(),
- data->node_origins(), data->mcgraph(), wire_bytes, 0);
+ data->node_origins(), data->mcgraph(), wire_bytes,
+ &heuristics);
AddReducer(data, &graph_reducer, &dead);
AddReducer(data, &graph_reducer, &inliner);
@@ -1850,9 +1853,9 @@ struct LoadEliminationPhase {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
- BranchElimination branch_condition_elimination(&graph_reducer,
- data->jsgraph(), temp_zone,
- BranchElimination::kEARLY);
+ BranchElimination branch_condition_elimination(
+ &graph_reducer, data->jsgraph(), temp_zone, data->source_positions(),
+ BranchElimination::kEARLY);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
@@ -1919,8 +1922,8 @@ struct LateOptimizationPhase {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
- BranchElimination branch_condition_elimination(&graph_reducer,
- data->jsgraph(), temp_zone);
+ BranchElimination branch_condition_elimination(
+ &graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
@@ -2048,7 +2051,7 @@ struct WasmOptimizationPhase {
data->machine(), temp_zone);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
BranchElimination branch_condition_elimination(
- &graph_reducer, data->jsgraph(), temp_zone);
+ &graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
@@ -2103,7 +2106,7 @@ struct CsaEarlyOptimizationPhase {
data->machine(), temp_zone);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
BranchElimination branch_condition_elimination(
- &graph_reducer, data->jsgraph(), temp_zone);
+ &graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
@@ -2121,8 +2124,8 @@ struct CsaOptimizationPhase {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
- BranchElimination branch_condition_elimination(&graph_reducer,
- data->jsgraph(), temp_zone);
+ BranchElimination branch_condition_elimination(
+ &graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
@@ -3097,7 +3100,7 @@ std::ostream& operator<<(std::ostream& out, const BlockStartsAsJSON& s) {
// static
wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
CallDescriptor* call_descriptor, MachineGraph* mcgraph, CodeKind kind,
- int wasm_kind, const char* debug_name, const AssemblerOptions& options,
+ const char* debug_name, const AssemblerOptions& options,
SourcePositionTable* source_positions) {
Graph* graph = mcgraph->graph();
OptimizedCompilationInfo info(base::CStrVector(debug_name), graph->zone(),
@@ -3160,6 +3163,9 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
result.frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
result.tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
result.result_tier = wasm::ExecutionTier::kTurbofan;
+ if (kind == CodeKind::WASM_TO_JS_FUNCTION) {
+ result.kind = wasm::WasmCompilationResult::kWasmToJsWrapper;
+ }
DCHECK(result.succeeded());
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 19fd715885..2a166b2073 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -66,8 +66,7 @@ class Pipeline : public AllStatic {
// Run the pipeline on a machine graph and generate code.
static wasm::WasmCompilationResult GenerateCodeForWasmNativeStub(
CallDescriptor* call_descriptor, MachineGraph* mcgraph, CodeKind kind,
- int wasm_kind, const char* debug_name,
- const AssemblerOptions& assembler_options,
+ const char* debug_name, const AssemblerOptions& assembler_options,
SourcePositionTable* source_positions = nullptr);
// Returns a new compilation job for a wasm heap stub.
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 383d63dd69..2a2eb07fe1 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -190,12 +190,12 @@ void RawMachineAssembler::OptimizeControlFlow(Schedule* schedule, Graph* graph,
false_block->ClearPredecessors();
size_t arity = block->PredecessorCount();
- for (size_t i = 0; i < arity; ++i) {
- BasicBlock* predecessor = block->PredecessorAt(i);
+ for (size_t j = 0; j < arity; ++j) {
+ BasicBlock* predecessor = block->PredecessorAt(j);
predecessor->ClearSuccessors();
if (block->deferred()) predecessor->set_deferred(true);
Node* branch_clone = graph->CloneNode(branch);
- int phi_input = static_cast<int>(i);
+ int phi_input = static_cast<int>(j);
NodeProperties::ReplaceValueInput(
branch_clone, NodeProperties::GetValueInput(phi, phi_input), 0);
BasicBlock* new_true_block = schedule->NewBasicBlock();
@@ -571,14 +571,14 @@ void RawMachineAssembler::Switch(Node* index, RawMachineLabel* default_label,
size_t succ_count = case_count + 1;
Node* switch_node = MakeNode(common()->Switch(succ_count), 1, &index);
BasicBlock** succ_blocks = zone()->NewArray<BasicBlock*>(succ_count);
- for (size_t index = 0; index < case_count; ++index) {
- int32_t case_value = case_values[index];
+ for (size_t i = 0; i < case_count; ++i) {
+ int32_t case_value = case_values[i];
BasicBlock* case_block = schedule()->NewBasicBlock();
Node* case_node =
graph()->NewNode(common()->IfValue(case_value), switch_node);
schedule()->AddNode(case_block, case_node);
- schedule()->AddGoto(case_block, Use(case_labels[index]));
- succ_blocks[index] = case_block;
+ schedule()->AddGoto(case_block, Use(case_labels[i]));
+ succ_blocks[i] = case_block;
}
BasicBlock* default_block = schedule()->NewBasicBlock();
Node* default_node = graph()->NewNode(common()->IfDefault(), switch_node);
@@ -673,8 +673,8 @@ void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3,
current_block_ = nullptr;
}
-void RawMachineAssembler::AbortCSAAssert(Node* message) {
- AddNode(machine()->AbortCSAAssert(), message);
+void RawMachineAssembler::AbortCSADcheck(Node* message) {
+ AddNode(machine()->AbortCSADcheck(), message);
}
void RawMachineAssembler::DebugBreak() { AddNode(machine()->DebugBreak()); }
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index f0bb6e0425..23051dfbba 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -1033,7 +1033,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3, Node* v4);
void Bind(RawMachineLabel* label);
void Deoptimize(Node* state);
- void AbortCSAAssert(Node* message);
+ void AbortCSADcheck(Node* message);
void DebugBreak();
void Unreachable();
void Comment(const std::string& msg);
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index 07a716bfa7..a54caf2abe 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -967,8 +967,9 @@ class SpecialRPONumberer : public ZoneObject {
if (HasLoopNumber(current)) {
++loop_depth;
current_loop = &loops_[GetLoopNumber(current)];
- BasicBlock* end = current_loop->end;
- current->set_loop_end(end == nullptr ? BeyondEndSentinel() : end);
+ BasicBlock* loop_end = current_loop->end;
+ current->set_loop_end(loop_end == nullptr ? BeyondEndSentinel()
+ : loop_end);
current_header = current_loop->header;
TRACE("id:%d is a loop header, increment loop depth to %d\n",
current->id().ToInt(), loop_depth);
@@ -1025,8 +1026,8 @@ class SpecialRPONumberer : public ZoneObject {
// loop header H are members of the loop too. O(|blocks between M and H|).
while (queue_length > 0) {
BasicBlock* block = (*queue)[--queue_length].block;
- for (size_t i = 0; i < block->PredecessorCount(); i++) {
- BasicBlock* pred = block->PredecessorAt(i);
+ for (size_t j = 0; j < block->PredecessorCount(); j++) {
+ BasicBlock* pred = block->PredecessorAt(j);
if (pred != header) {
if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
loops_[loop_num].members->Add(pred->id().ToInt());
@@ -1124,7 +1125,7 @@ class SpecialRPONumberer : public ZoneObject {
// Check the contiguousness of loops.
int count = 0;
for (int j = 0; j < static_cast<int>(order->size()); j++) {
- BasicBlock* block = order->at(j);
+ block = order->at(j);
DCHECK_EQ(block->rpo_number(), j);
if (j < header->rpo_number() || j >= end->rpo_number()) {
DCHECK(!header->LoopContains(block));
@@ -1440,9 +1441,9 @@ class ScheduleLateNodeVisitor {
queue->push(node);
do {
scheduler_->tick_counter_->TickAndMaybeEnterSafepoint();
- Node* const node = queue->front();
+ Node* const n = queue->front();
queue->pop();
- VisitNode(node);
+ VisitNode(n);
} while (!queue->empty());
}
}
@@ -1821,8 +1822,8 @@ void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
// temporary solution and should be merged into the rest of the scheduler as
// soon as the approach settled for all floating loops.
NodeVector propagation_roots(control_flow_builder_->control_);
- for (Node* node : control_flow_builder_->control_) {
- for (Node* use : node->uses()) {
+ for (Node* control : control_flow_builder_->control_) {
+ for (Node* use : control->uses()) {
if (NodeProperties::IsPhi(use) && IsLive(use)) {
propagation_roots.push_back(use);
}
@@ -1830,8 +1831,8 @@ void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
}
if (FLAG_trace_turbo_scheduler) {
TRACE("propagation roots: ");
- for (Node* node : propagation_roots) {
- TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
+ for (Node* r : propagation_roots) {
+ TRACE("#%d:%s ", r->id(), r->op()->mnemonic());
}
TRACE("\n");
}
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index a1f9b93dce..15c9f195e0 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -275,6 +275,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
DCHECK(!map.is_undetectable());
return kBoundFunction;
case JS_FUNCTION_TYPE:
+ case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_PROMISE_CONSTRUCTOR_TYPE:
case JS_REG_EXP_CONSTRUCTOR_TYPE:
case JS_ARRAY_CONSTRUCTOR_TYPE:
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index a28a28c59e..cdd8c0b0f0 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -72,10 +72,6 @@ namespace compiler {
// existing assumptions or tests.
// Consequently, do not normally use Equals for type tests, always use Is!
//
-// The NowIs operator implements state-sensitive subtying, as described above.
-// Any compilation decision based on such temporary properties requires runtime
-// guarding!
-//
//
// PROPERTIES
//
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index a0f2aa569d..a8bbd06b5f 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -919,7 +919,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
break;
case IrOpcode::kComment:
- case IrOpcode::kAbortCSAAssert:
+ case IrOpcode::kAbortCSADcheck:
case IrOpcode::kDebugBreak:
case IrOpcode::kRetain:
case IrOpcode::kUnsafePointerAdd:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index b3d6e7bb74..8446640bfc 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -654,8 +654,9 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
mcgraph()->machine()->StackPointerGreaterThan(StackCheckKind::kWasm),
limit, effect()));
- Diamond stack_check(graph(), mcgraph()->common(), check, BranchHint::kTrue);
- stack_check.Chain(control());
+ Node* if_true;
+ Node* if_false;
+ gasm_->Branch(check, &if_true, &if_false, BranchHint::kTrue);
if (stack_check_call_operator_ == nullptr) {
// Build and cache the stack check call operator and the constant
@@ -676,15 +677,18 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
stack_check_call_operator_ = mcgraph()->common()->Call(call_descriptor);
}
- Node* call = graph()->NewNode(stack_check_call_operator_.get(),
- stack_check_code_node_.get(), effect(),
- stack_check.if_false);
-
+ Node* call =
+ graph()->NewNode(stack_check_call_operator_.get(),
+ stack_check_code_node_.get(), effect(), if_false);
SetSourcePosition(call, position);
- Node* ephi = stack_check.EffectPhi(effect(), call);
+ DCHECK_GT(call->op()->ControlOutputCount(), 0);
+ Node* merge = graph()->NewNode(mcgraph()->common()->Merge(2), if_true, call);
+ DCHECK_GT(call->op()->EffectOutputCount(), 0);
+ Node* ephi = graph()->NewNode(mcgraph()->common()->EffectPhi(2), effect(),
+ call, merge);
- SetEffectControl(ephi, stack_check.merge);
+ SetEffectControl(ephi, merge);
}
void WasmGraphBuilder::PatchInStackCheckIfNeeded() {
@@ -2905,8 +2909,8 @@ Node* WasmGraphBuilder::BuildWasmCall(const wasm::FunctionSig* sig,
const Operator* op = mcgraph()->common()->Call(call_descriptor);
Node* call =
BuildCallNode(sig, args, position, instance_node, op, frame_state);
- // TODO(manoskouk): Don't always set control if we ever add properties to wasm
- // calls.
+ // TODO(manoskouk): If we have kNoThrow calls, do not set them as control.
+ DCHECK_GT(call->op()->ControlOutputCount(), 0);
SetControl(call);
size_t ret_count = sig->return_count();
@@ -2935,8 +2939,8 @@ Node* WasmGraphBuilder::BuildWasmReturnCall(const wasm::FunctionSig* sig,
const Operator* op = mcgraph()->common()->TailCall(call_descriptor);
Node* call = BuildCallNode(sig, args, position, instance_node, op);
- // TODO(manoskouk): {call} will not always be a control node if we ever add
- // properties to wasm calls.
+ // TODO(manoskouk): If we have kNoThrow calls, do not merge them to end.
+ DCHECK_GT(call->op()->ControlOutputCount(), 0);
gasm_->MergeControlToEnd(call);
return call;
@@ -3155,7 +3159,7 @@ Node* WasmGraphBuilder::BuildLoadCallTargetFromExportedFunctionData(
}
// TODO(9495): Support CAPI function refs.
-Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index,
+Node* WasmGraphBuilder::BuildCallRef(const wasm::FunctionSig* sig,
base::Vector<Node*> args,
base::Vector<Node*> rets,
CheckForNull null_check,
@@ -3166,8 +3170,6 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index,
position);
}
- const wasm::FunctionSig* sig = env_->module->signature(sig_index);
-
Node* function_data = gasm_->LoadFunctionDataFromJSFunction(args[0]);
auto load_target = gasm_->MakeLabel();
@@ -3227,20 +3229,37 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index,
return call;
}
-Node* WasmGraphBuilder::CallRef(uint32_t sig_index, base::Vector<Node*> args,
+void WasmGraphBuilder::CompareToExternalFunctionAtIndex(
+ Node* func_ref, uint32_t function_index, Node** success_control,
+ Node** failure_control) {
+ // Since we are comparing to a function reference, it is guaranteed that
+ // instance->wasm_external_functions() has been initialized.
+ Node* external_functions = gasm_->LoadFromObject(
+ MachineType::TaggedPointer(), GetInstance(),
+ wasm::ObjectAccess::ToTagged(
+ WasmInstanceObject::kWasmExternalFunctionsOffset));
+ Node* function_ref = gasm_->LoadFixedArrayElement(
+ external_functions, gasm_->IntPtrConstant(function_index),
+ MachineType::AnyTagged());
+ gasm_->Branch(gasm_->WordEqual(function_ref, func_ref), success_control,
+ failure_control, BranchHint::kTrue);
+}
+
+Node* WasmGraphBuilder::CallRef(const wasm::FunctionSig* sig,
+ base::Vector<Node*> args,
base::Vector<Node*> rets,
WasmGraphBuilder::CheckForNull null_check,
wasm::WasmCodePosition position) {
- return BuildCallRef(sig_index, args, rets, null_check,
- IsReturnCall::kCallContinues, position);
+ return BuildCallRef(sig, args, rets, null_check, IsReturnCall::kCallContinues,
+ position);
}
-Node* WasmGraphBuilder::ReturnCallRef(uint32_t sig_index,
+Node* WasmGraphBuilder::ReturnCallRef(const wasm::FunctionSig* sig,
base::Vector<Node*> args,
WasmGraphBuilder::CheckForNull null_check,
wasm::WasmCodePosition position) {
- return BuildCallRef(sig_index, args, {}, null_check,
- IsReturnCall::kReturnCall, position);
+ return BuildCallRef(sig, args, {}, null_check, IsReturnCall::kReturnCall,
+ position);
}
Node* WasmGraphBuilder::ReturnCall(uint32_t index, base::Vector<Node*> args,
@@ -5563,6 +5582,7 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
length, gasm_->Uint32Constant(WasmArray::MaxLength(type))),
position);
wasm::ValueType element_type = type->element_type();
+ // TODO(7748): Consider using gasm_->Allocate().
Builtin stub = ChooseArrayAllocationBuiltin(element_type, initial_value);
// Do NOT mark this as Operator::kEliminatable, because that would cause the
// Call node to have no control inputs, which means it could get scheduled
@@ -5597,6 +5617,25 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
return a;
}
+Node* WasmGraphBuilder::ArrayInit(uint32_t array_index,
+ const wasm::ArrayType* type, Node* rtt,
+ base::Vector<Node*> elements) {
+ wasm::ValueType element_type = type->element_type();
+ // TODO(7748): Consider using gasm_->Allocate().
+ Node* array =
+ gasm_->CallBuiltin(Builtin::kWasmAllocateArray_Uninitialized,
+ Operator::kNoDeopt | Operator::kNoThrow, rtt,
+ Int32Constant(static_cast<int32_t>(elements.size())),
+ Int32Constant(element_type.element_size_bytes()));
+ for (int i = 0; i < static_cast<int>(elements.size()); i++) {
+ Node* offset =
+ gasm_->WasmArrayElementOffset(Int32Constant(i), element_type);
+ gasm_->StoreToObject(ObjectAccessForGCStores(element_type), array, offset,
+ elements[i]);
+ }
+ return array;
+}
+
Node* WasmGraphBuilder::RttCanon(uint32_t type_index) {
Node* maps_list =
LOAD_INSTANCE_FIELD(ManagedObjectMaps, MachineType::TaggedPointer());
@@ -5974,6 +6013,11 @@ void WasmGraphBuilder::ArrayCopy(Node* dst_array, Node* dst_index,
BoundsCheckArrayCopy(dst_array, dst_index, length, position);
BoundsCheckArrayCopy(src_array, src_index, length, position);
+ auto skip = gasm_->MakeLabel();
+
+ gasm_->GotoIf(gasm_->WordEqual(length, Int32Constant(0)), &skip,
+ BranchHint::kFalse);
+
Node* function =
gasm_->ExternalConstant(ExternalReference::wasm_array_copy());
MachineType arg_types[]{
@@ -5983,6 +6027,8 @@ void WasmGraphBuilder::ArrayCopy(Node* dst_array, Node* dst_index,
MachineSignature sig(0, 6, arg_types);
BuildCCall(&sig, function, GetInstance(), dst_array, dst_index, src_array,
src_index, length);
+ gasm_->Goto(&skip);
+ gasm_->Bind(&skip);
}
// 1 bit V8 Smi tag, 31 bits V8 Smi shift, 1 bit i31ref high-bit truncation.
@@ -7501,7 +7547,7 @@ wasm::WasmCompilationResult CompileWasmMathIntrinsic(
wasm::CompilationEnv env(
nullptr, wasm::kNoBoundsChecks,
wasm::RuntimeExceptionSupport::kNoRuntimeExceptionSupport,
- wasm::WasmFeatures::All());
+ wasm::WasmFeatures::All(), wasm::DynamicTiering::kDisabled);
WasmGraphBuilder builder(&env, mcgraph->zone(), mcgraph, sig,
source_positions);
@@ -7532,11 +7578,12 @@ wasm::WasmCompilationResult CompileWasmMathIntrinsic(
call_descriptor = GetI32WasmCallDescriptor(&zone, call_descriptor);
}
- wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub(
- call_descriptor, mcgraph, CodeKind::WASM_FUNCTION,
- wasm::WasmCode::kFunction, debug_name, WasmStubAssemblerOptions(),
- source_positions);
- return result;
+ // The code does not call to JS, but conceptually it is an import wrapper,
+ // hence use {WASM_TO_JS_FUNCTION} here.
+ // TODO(wasm): Rename this to {WASM_IMPORT_CALL}?
+ return Pipeline::GenerateCodeForWasmNativeStub(
+ call_descriptor, mcgraph, CodeKind::WASM_TO_JS_FUNCTION, debug_name,
+ WasmStubAssemblerOptions(), source_positions);
}
} // namespace
@@ -7590,12 +7637,9 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
if (machine->Is32()) {
incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
- wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub(
- incoming, mcgraph, CodeKind::WASM_TO_JS_FUNCTION,
- wasm::WasmCode::kWasmToJsWrapper, func_name, WasmStubAssemblerOptions(),
- source_position_table);
- result.kind = wasm::WasmCompilationResult::kWasmToJsWrapper;
- return result;
+ return Pipeline::GenerateCodeForWasmNativeStub(
+ incoming, mcgraph, CodeKind::WASM_TO_JS_FUNCTION, func_name,
+ WasmStubAssemblerOptions(), source_position_table);
}
wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule* native_module,
@@ -7634,8 +7678,7 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule* native_module,
const char* debug_name = "WasmCapiCall";
wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub(
- call_descriptor, mcgraph, CodeKind::WASM_TO_CAPI_FUNCTION,
- wasm::WasmCode::kWasmToCapiWrapper, debug_name,
+ call_descriptor, mcgraph, CodeKind::WASM_TO_CAPI_FUNCTION, debug_name,
WasmStubAssemblerOptions(), source_positions);
wasm::WasmCode* published_code;
{
@@ -7816,10 +7859,9 @@ bool BuildGraphForWasmFunction(wasm::CompilationEnv* env,
WasmGraphBuilder builder(env, mcgraph->zone(), mcgraph, func_body.sig,
source_positions);
auto* allocator = wasm::GetWasmEngine()->allocator();
- wasm::VoidResult graph_construction_result =
- wasm::BuildTFGraph(allocator, env->enabled_features, env->module,
- &builder, detected, func_body, loop_infos,
- node_origins, func_index, wasm::kInstrumentEndpoints);
+ wasm::VoidResult graph_construction_result = wasm::BuildTFGraph(
+ allocator, env->enabled_features, env->module, &builder, detected,
+ func_body, loop_infos, node_origins, func_index, wasm::kRegularFunction);
if (graph_construction_result.failed()) {
if (FLAG_trace_wasm_compiler) {
StdoutStream{} << "Compilation failed: "
@@ -7903,7 +7945,8 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
}
if (ContainsSimd(func_body.sig) && !CpuFeatures::SupportsWasmSimd128()) {
- call_descriptor = GetI32WasmCallDescriptorForSimd(&zone, call_descriptor);
+ // Fail compilation if hardware does not support SIMD.
+ return wasm::WasmCompilationResult{};
}
Pipeline::GenerateCodeForWasmFunction(&info, env, wire_bytes_storage, mcgraph,
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 328152b363..ad33c7e1c6 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -325,16 +325,19 @@ class WasmGraphBuilder {
Node* CallIndirect(uint32_t table_index, uint32_t sig_index,
base::Vector<Node*> args, base::Vector<Node*> rets,
wasm::WasmCodePosition position);
- Node* CallRef(uint32_t sig_index, base::Vector<Node*> args,
+ Node* CallRef(const wasm::FunctionSig* sig, base::Vector<Node*> args,
base::Vector<Node*> rets, CheckForNull null_check,
wasm::WasmCodePosition position);
+ void CompareToExternalFunctionAtIndex(Node* func_ref, uint32_t function_index,
+ Node** success_control,
+ Node** failure_control);
Node* ReturnCall(uint32_t index, base::Vector<Node*> args,
wasm::WasmCodePosition position);
Node* ReturnCallIndirect(uint32_t table_index, uint32_t sig_index,
base::Vector<Node*> args,
wasm::WasmCodePosition position);
- Node* ReturnCallRef(uint32_t sig_index, base::Vector<Node*> args,
+ Node* ReturnCallRef(const wasm::FunctionSig* sig, base::Vector<Node*> args,
CheckForNull null_check, wasm::WasmCodePosition position);
void BrOnNull(Node* ref_object, Node** non_null_node, Node** null_node);
@@ -474,6 +477,8 @@ class WasmGraphBuilder {
void ArrayCopy(Node* dst_array, Node* dst_index, CheckForNull dst_null_check,
Node* src_array, Node* src_index, CheckForNull src_null_check,
Node* length, wasm::WasmCodePosition position);
+ Node* ArrayInit(uint32_t array_index, const wasm::ArrayType* type, Node* rtt,
+ base::Vector<Node*> elements);
Node* I31New(Node* input);
Node* I31GetS(Node* input);
Node* I31GetU(Node* input);
@@ -586,7 +591,7 @@ class WasmGraphBuilder {
base::Vector<Node*> rets,
wasm::WasmCodePosition position, Node* func_index,
IsReturnCall continuation);
- Node* BuildCallRef(uint32_t sig_index, base::Vector<Node*> args,
+ Node* BuildCallRef(const wasm::FunctionSig* sig, base::Vector<Node*> args,
base::Vector<Node*> rets, CheckForNull null_check,
IsReturnCall continuation,
wasm::WasmCodePosition position);
diff --git a/deps/v8/src/compiler/wasm-inlining.cc b/deps/v8/src/compiler/wasm-inlining.cc
index 6753769953..965b467d67 100644
--- a/deps/v8/src/compiler/wasm-inlining.cc
+++ b/deps/v8/src/compiler/wasm-inlining.cc
@@ -4,6 +4,8 @@
#include "src/compiler/wasm-inlining.h"
+#include "src/compiler/all-nodes.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/wasm-compiler.h"
#include "src/wasm/function-body-decoder.h"
@@ -16,34 +18,47 @@ namespace internal {
namespace compiler {
Reduction WasmInliner::Reduce(Node* node) {
- if (node->opcode() == IrOpcode::kCall) {
- return ReduceCall(node);
- } else {
- return NoChange();
+ switch (node->opcode()) {
+ case IrOpcode::kCall:
+ case IrOpcode::kTailCall:
+ return ReduceCall(node);
+ default:
+ return NoChange();
}
}
-// TODO(12166): Abstract over a heuristics provider.
+// TODO(12166): Save inlined frames for trap/--trace-wasm purposes. Consider
+// tail calls.
+// TODO(12166): Inline indirect calls/call_ref.
Reduction WasmInliner::ReduceCall(Node* call) {
+ DCHECK(call->opcode() == IrOpcode::kCall ||
+ call->opcode() == IrOpcode::kTailCall);
Node* callee = NodeProperties::GetValueInput(call, 0);
IrOpcode::Value reloc_opcode = mcgraph_->machine()->Is32()
? IrOpcode::kRelocatableInt32Constant
: IrOpcode::kRelocatableInt64Constant;
if (callee->opcode() != reloc_opcode) return NoChange();
auto info = OpParameter<RelocatablePtrConstantInfo>(callee->op());
- if (static_cast<uint32_t>(info.value()) != inlinee_index_) return NoChange();
+ uint32_t inlinee_index = static_cast<uint32_t>(info.value());
+ if (!heuristics_->DoInline(source_positions_->GetSourcePosition(call),
+ inlinee_index)) {
+ return NoChange();
+ }
+
+ CHECK_LT(inlinee_index, module()->functions.size());
+ const wasm::WasmFunction* inlinee = &module()->functions[inlinee_index];
- CHECK_LT(inlinee_index_, module()->functions.size());
- const wasm::WasmFunction* function = &module()->functions[inlinee_index_];
- base::Vector<const byte> function_bytes =
- wire_bytes_->GetCode(function->code);
- const wasm::FunctionBody inlinee_body(function->sig, function->code.offset(),
+ base::Vector<const byte> function_bytes = wire_bytes_->GetCode(inlinee->code);
+
+ const wasm::FunctionBody inlinee_body(inlinee->sig, inlinee->code.offset(),
function_bytes.begin(),
function_bytes.end());
wasm::WasmFeatures detected;
- WasmGraphBuilder builder(env_, zone(), mcgraph_, inlinee_body.sig, spt_);
+ WasmGraphBuilder builder(env_, zone(), mcgraph_, inlinee_body.sig,
+ source_positions_);
std::vector<WasmLoopInfo> infos;
+ size_t subgraph_min_node_id = graph()->NodeCount();
wasm::DecodeResult result;
Node* inlinee_start;
Node* inlinee_end;
@@ -51,25 +66,24 @@ Reduction WasmInliner::ReduceCall(Node* call) {
Graph::SubgraphScope scope(graph());
result = wasm::BuildTFGraph(zone()->allocator(), env_->enabled_features,
module(), &builder, &detected, inlinee_body,
- &infos, node_origins_, inlinee_index_,
- wasm::kDoNotInstrumentEndpoints);
+ &infos, node_origins_, inlinee_index,
+ wasm::kInlinedFunction);
inlinee_start = graph()->start();
inlinee_end = graph()->end();
}
if (result.failed()) return NoChange();
- return InlineCall(call, inlinee_start, inlinee_end);
+ return call->opcode() == IrOpcode::kCall
+ ? InlineCall(call, inlinee_start, inlinee_end, inlinee->sig,
+ subgraph_min_node_id)
+ : InlineTailCall(call, inlinee_start, inlinee_end);
}
-// TODO(12166): Handle exceptions and tail calls.
-Reduction WasmInliner::InlineCall(Node* call, Node* callee_start,
- Node* callee_end) {
- DCHECK_EQ(call->opcode(), IrOpcode::kCall);
-
- /* 1) Rewire callee formal parameters to the call-site real parameters. Rewire
- * effect and control dependencies of callee's start node with the respective
- * inputs of the call node.
- */
+/* Rewire callee formal parameters to the call-site real parameters. Rewire
+ * effect and control dependencies of callee's start node with the respective
+ * inputs of the call node.
+ */
+void WasmInliner::RewireFunctionEntry(Node* call, Node* callee_start) {
Node* control = NodeProperties::GetControlInput(call);
Node* effect = NodeProperties::GetEffectInput(call);
@@ -93,16 +107,55 @@ Reduction WasmInliner::InlineCall(Node* call, Node* callee_start,
break;
}
}
+}
- /* 2) Rewire uses of the call node to the return values of the callee. Since
- * there might be multiple return nodes in the callee, we have to create Merge
- * and Phi nodes for them.
- */
+Reduction WasmInliner::InlineTailCall(Node* call, Node* callee_start,
+ Node* callee_end) {
+ DCHECK(call->opcode() == IrOpcode::kTailCall);
+ // 1) Rewire function entry.
+ RewireFunctionEntry(call, callee_start);
+ // 2) For tail calls, all we have to do is rewire all terminators of the
+ // inlined graph to the end of the caller graph.
+ for (Node* const input : callee_end->inputs()) {
+ DCHECK(IrOpcode::IsGraphTerminator(input->opcode()));
+ NodeProperties::MergeControlToEnd(graph(), common(), input);
+ Revisit(graph()->end());
+ }
+ callee_end->Kill();
+ return Replace(mcgraph()->Dead());
+}
+
+Reduction WasmInliner::InlineCall(Node* call, Node* callee_start,
+ Node* callee_end,
+ const wasm::FunctionSig* inlinee_sig,
+ size_t subgraph_min_node_id) {
+ DCHECK(call->opcode() == IrOpcode::kCall);
+
+ // 0) Before doing anything, if {call} has an exception handler, collect all
+ // unhandled calls in the subgraph.
+ Node* handler = nullptr;
+ std::vector<Node*> unhandled_subcalls;
+ if (NodeProperties::IsExceptionalCall(call, &handler)) {
+ AllNodes subgraph_nodes(zone(), callee_end, graph());
+ for (Node* node : subgraph_nodes.reachable) {
+ if (node->id() >= subgraph_min_node_id &&
+ !node->op()->HasProperty(Operator::kNoThrow) &&
+ !NodeProperties::IsExceptionalCall(node)) {
+ unhandled_subcalls.push_back(node);
+ }
+ }
+ }
+
+ // 1) Rewire function entry.
+ RewireFunctionEntry(call, callee_start);
+
+ // 2) Handle all graph terminators for the callee.
NodeVector return_nodes(zone());
for (Node* const input : callee_end->inputs()) {
DCHECK(IrOpcode::IsGraphTerminator(input->opcode()));
switch (input->opcode()) {
case IrOpcode::kReturn:
+ // Returns are collected to be rewired into the caller graph later.
return_nodes.push_back(input);
break;
case IrOpcode::kDeoptimize:
@@ -111,16 +164,79 @@ Reduction WasmInliner::InlineCall(Node* call, Node* callee_start,
NodeProperties::MergeControlToEnd(graph(), common(), input);
Revisit(graph()->end());
break;
- case IrOpcode::kTailCall:
- // TODO(12166): A tail call in the inlined function has to be
- // transformed into a regular call in the caller function.
- UNIMPLEMENTED();
+ case IrOpcode::kTailCall: {
+ // A tail call in the callee inlined in a regular call in the caller has
+ // to be transformed into a regular call, and then returned from the
+ // inlinee. It will then be handled like any other return.
+ auto descriptor = CallDescriptorOf(input->op());
+ NodeProperties::ChangeOp(input, common()->Call(descriptor));
+ int return_arity = static_cast<int>(inlinee_sig->return_count());
+ NodeVector return_inputs(zone());
+ // The first input of a return node is always the 0 constant.
+ return_inputs.push_back(graph()->NewNode(common()->Int32Constant(0)));
+ if (return_arity == 1) {
+ return_inputs.push_back(input);
+ } else if (return_arity > 1) {
+ for (int i = 0; i < return_arity; i++) {
+ return_inputs.push_back(
+ graph()->NewNode(common()->Projection(i), input, input));
+ }
+ }
+
+ // Add effect and control inputs.
+ return_inputs.push_back(input->op()->EffectOutputCount() > 0
+ ? input
+ : NodeProperties::GetEffectInput(input));
+ return_inputs.push_back(input->op()->ControlOutputCount() > 0
+ ? input
+ : NodeProperties::GetControlInput(input));
+
+ Node* ret = graph()->NewNode(common()->Return(return_arity),
+ static_cast<int>(return_inputs.size()),
+ return_inputs.data());
+ return_nodes.push_back(ret);
+ break;
+ }
default:
UNREACHABLE();
}
}
+ callee_end->Kill();
+
+ // 3) Rewire unhandled calls to the handler.
+ std::vector<Node*> on_exception_nodes;
+ for (Node* subcall : unhandled_subcalls) {
+ Node* on_success = graph()->NewNode(common()->IfSuccess(), subcall);
+ NodeProperties::ReplaceUses(subcall, subcall, subcall, on_success);
+ NodeProperties::ReplaceControlInput(on_success, subcall);
+ Node* on_exception =
+ graph()->NewNode(common()->IfException(), subcall, subcall);
+ on_exception_nodes.push_back(on_exception);
+ }
+
+ int subcall_count = static_cast<int>(on_exception_nodes.size());
+
+ if (subcall_count > 0) {
+ Node* control_output =
+ graph()->NewNode(common()->Merge(subcall_count), subcall_count,
+ on_exception_nodes.data());
+ on_exception_nodes.push_back(control_output);
+ Node* value_output = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, subcall_count),
+ subcall_count + 1, on_exception_nodes.data());
+ Node* effect_output =
+ graph()->NewNode(common()->EffectPhi(subcall_count), subcall_count + 1,
+ on_exception_nodes.data());
+ ReplaceWithValue(handler, value_output, effect_output, control_output);
+ } else if (handler != nullptr) {
+ // Nothing in the inlined function can throw. Remove the handler.
+ ReplaceWithValue(handler, mcgraph()->Dead(), mcgraph()->Dead(),
+ mcgraph()->Dead());
+ }
if (return_nodes.size() > 0) {
+ /* 4) Collect all return site value, effect, and control inputs into phis
+ * and merges. */
int const return_count = static_cast<int>(return_nodes.size());
NodeVector controls(zone());
NodeVector effects(zone());
@@ -150,14 +266,14 @@ Reduction WasmInliner::InlineCall(Node* call, Node* callee_start,
ith_values.push_back(control_output);
// Find the correct machine representation for the return values from the
// inlinee signature.
- const wasm::WasmFunction* function = &module()->functions[inlinee_index_];
MachineRepresentation repr =
- function->sig->GetReturn(i).machine_representation();
+ inlinee_sig->GetReturn(i).machine_representation();
Node* ith_value_output = graph()->NewNode(
common()->Phi(repr, return_count),
static_cast<int>(ith_values.size()), &ith_values.front());
values.push_back(ith_value_output);
}
+ for (Node* return_node : return_nodes) return_node->Kill();
if (return_arity == 0) {
// Void function, no value uses.
diff --git a/deps/v8/src/compiler/wasm-inlining.h b/deps/v8/src/compiler/wasm-inlining.h
index 8b31b6b291..b63e232198 100644
--- a/deps/v8/src/compiler/wasm-inlining.h
+++ b/deps/v8/src/compiler/wasm-inlining.h
@@ -18,6 +18,7 @@ namespace internal {
namespace wasm {
struct CompilationEnv;
struct WasmModule;
+struct WasmFunction;
class WireBytesStorage;
} // namespace wasm
@@ -29,24 +30,49 @@ namespace compiler {
class NodeOriginTable;
class SourcePositionTable;
+// Parent class for classes that provide heuristics on how to inline in wasm.
+class WasmInliningHeuristics {
+ public:
+ virtual bool DoInline(SourcePosition position,
+ uint32_t function_index) const = 0;
+};
+
+// A simple inlining heuristic that inlines all function calls to a set of given
+// function indices.
+class InlineByIndex : public WasmInliningHeuristics {
+ public:
+ explicit InlineByIndex(uint32_t function_index)
+ : WasmInliningHeuristics(), function_indices_(function_index) {}
+ InlineByIndex(std::initializer_list<uint32_t> function_indices)
+ : WasmInliningHeuristics(), function_indices_(function_indices) {}
+
+ bool DoInline(SourcePosition position,
+ uint32_t function_index) const override {
+ return function_indices_.count(function_index) > 0;
+ }
+
+ private:
+ std::unordered_set<uint32_t> function_indices_;
+};
+
// The WasmInliner provides the core graph inlining machinery for Webassembly
// graphs. Note that this class only deals with the mechanics of how to inline
-// one graph into another, heuristics that decide what and how much to inline
-// are beyond its scope. As a current placeholder, only a function at specific
-// given index {inlinee_index} is inlined.
+// one graph into another; heuristics that decide what and how much to inline
+// are provided by {WasmInliningHeuristics}.
class WasmInliner final : public AdvancedReducer {
public:
WasmInliner(Editor* editor, wasm::CompilationEnv* env,
- SourcePositionTable* spt, NodeOriginTable* node_origins,
- MachineGraph* mcgraph, const wasm::WireBytesStorage* wire_bytes,
- uint32_t inlinee_index)
+ SourcePositionTable* source_positions,
+ NodeOriginTable* node_origins, MachineGraph* mcgraph,
+ const wasm::WireBytesStorage* wire_bytes,
+ const WasmInliningHeuristics* heuristics)
: AdvancedReducer(editor),
env_(env),
- spt_(spt),
+ source_positions_(source_positions),
node_origins_(node_origins),
mcgraph_(mcgraph),
wire_bytes_(wire_bytes),
- inlinee_index_(inlinee_index) {}
+ heuristics_(heuristics) {}
const char* reducer_name() const override { return "WasmInliner"; }
@@ -58,16 +84,21 @@ class WasmInliner final : public AdvancedReducer {
Graph* graph() const { return mcgraph_->graph(); }
MachineGraph* mcgraph() const { return mcgraph_; }
const wasm::WasmModule* module() const;
+ const wasm::WasmFunction* inlinee() const;
Reduction ReduceCall(Node* call);
- Reduction InlineCall(Node* call, Node* callee_start, Node* callee_end);
+ Reduction InlineCall(Node* call, Node* callee_start, Node* callee_end,
+ const wasm::FunctionSig* inlinee_sig,
+ size_t subgraph_min_node_id);
+ Reduction InlineTailCall(Node* call, Node* callee_start, Node* callee_end);
+ void RewireFunctionEntry(Node* call, Node* callee_start);
wasm::CompilationEnv* const env_;
- SourcePositionTable* const spt_;
+ SourcePositionTable* const source_positions_;
NodeOriginTable* const node_origins_;
MachineGraph* const mcgraph_;
const wasm::WireBytesStorage* const wire_bytes_;
- const uint32_t inlinee_index_;
+ const WasmInliningHeuristics* const heuristics_;
};
} // namespace compiler
diff --git a/deps/v8/src/d8/d8-posix.cc b/deps/v8/src/d8/d8-posix.cc
index 8a031ccdc0..8db4beff0f 100644
--- a/deps/v8/src/d8/d8-posix.cc
+++ b/deps/v8/src/d8/d8-posix.cc
@@ -165,10 +165,12 @@ class ExecArgs {
"os.system(): String conversion of program name failed");
return false;
}
- int len = prog.length() + 3;
- char* c_arg = new char[len];
- snprintf(c_arg, len, "%s", *prog);
- exec_args_[0] = c_arg;
+ {
+ int len = prog.length() + 3;
+ char* c_arg = new char[len];
+ snprintf(c_arg, len, "%s", *prog);
+ exec_args_[0] = c_arg;
+ }
int i = 1;
for (unsigned j = 0; j < command_args->Length(); i++, j++) {
Local<Value> arg(
diff --git a/deps/v8/src/d8/d8-test.cc b/deps/v8/src/d8/d8-test.cc
index 6202c397ec..c474d3adb8 100644
--- a/deps/v8/src/d8/d8-test.cc
+++ b/deps/v8/src/d8/d8-test.cc
@@ -17,7 +17,8 @@
// and resetting these counters.
// Make sure to sync the following with src/compiler/globals.h.
-#if defined(V8_TARGET_ARCH_X64)
+#if defined(V8_TARGET_ARCH_X64) || \
+ (defined(V8_TARGET_ARCH_ARM64) && !defined(USE_SIMULATOR))
#define V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
#endif
@@ -95,10 +96,8 @@ class FastCApiObject {
#ifdef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
typedef double Type;
-#define type_info kTypeInfoFloat64
#else
typedef int32_t Type;
-#define type_info kTypeInfoInt32
#endif // V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
static Type AddAllSequenceFastCallback(Local<Object> receiver,
bool should_fallback,
@@ -120,8 +119,9 @@ class FastCApiObject {
}
Type buffer[1024];
- bool result = TryCopyAndConvertArrayToCppBuffer<&type_info, Type>(
- seq_arg, buffer, 1024);
+ bool result = TryToCopyAndConvertArrayToCppBuffer<
+ i::CTypeInfoBuilder<Type>::Build().GetId(), Type>(seq_arg, buffer,
+ 1024);
if (!result) {
options.fallback = 1;
return 0;
diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc
index 6d35be77b8..7455353821 100644
--- a/deps/v8/src/d8/d8.cc
+++ b/deps/v8/src/d8/d8.cc
@@ -52,7 +52,7 @@
#include "src/interpreter/interpreter.h"
#include "src/logging/counters.h"
#include "src/logging/log-utils.h"
-#include "src/objects/managed.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
#include "src/parsing/parse-info.h"
@@ -170,11 +170,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
void* AllocateVM(size_t length) {
DCHECK_LE(kVMThreshold, length);
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- v8::PageAllocator* page_allocator = i::GetPlatformDataCagePageAllocator();
-#else
- v8::PageAllocator* page_allocator = i::GetPlatformPageAllocator();
-#endif
+ v8::PageAllocator* page_allocator = i::GetArrayBufferPageAllocator();
size_t page_size = page_allocator->AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
return i::AllocatePages(page_allocator, nullptr, allocated, page_size,
@@ -182,11 +178,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
}
void FreeVM(void* data, size_t length) {
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- v8::PageAllocator* page_allocator = i::GetPlatformDataCagePageAllocator();
-#else
- v8::PageAllocator* page_allocator = i::GetPlatformPageAllocator();
-#endif
+ v8::PageAllocator* page_allocator = i::GetArrayBufferPageAllocator();
size_t page_size = page_allocator->AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
CHECK(i::FreePages(page_allocator, data, allocated));
@@ -714,7 +706,7 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
ScriptOrigin origin(isolate, name);
for (int i = 1; i < options.repeat_compile; ++i) {
- HandleScope handle_scope(isolate);
+ HandleScope handle_scope_for_compiling(isolate);
if (CompileString<Script>(isolate, context, source, origin).IsEmpty()) {
return false;
}
@@ -3260,10 +3252,10 @@ void Shell::WriteLcovData(v8::Isolate* isolate, const char* file) {
int end_line = end.GetLineNumber();
uint32_t count = function_data.Count();
- Local<String> name;
+ Local<String> function_name;
std::stringstream name_stream;
- if (function_data.Name().ToLocal(&name)) {
- name_stream << ToSTLString(isolate, name);
+ if (function_data.Name().ToLocal(&function_name)) {
+ name_stream << ToSTLString(isolate, function_name);
} else {
name_stream << "<" << start_line + 1 << "-";
name_stream << start.GetColumnNumber() << ">";
@@ -3283,8 +3275,8 @@ void Shell::WriteLcovData(v8::Isolate* isolate, const char* file) {
}
}
// Write per-line coverage. LCOV uses 1-based line numbers.
- for (size_t i = 0; i < lines.size(); i++) {
- sink << "DA:" << (i + 1) << "," << lines[i] << std::endl;
+ for (size_t j = 0; j < lines.size(); j++) {
+ sink << "DA:" << (j + 1) << "," << lines[j] << std::endl;
}
sink << "end_of_record" << std::endl;
}
@@ -5202,15 +5194,15 @@ int Shell::Main(int argc, char* argv[]) {
ShellOptions::CodeCacheOptions::kNoProduceCache) {
printf("============ Run: Produce code cache ============\n");
// First run to produce the cache
- Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = Shell::array_buffer_allocator;
+ Isolate::CreateParams create_params2;
+ create_params2.array_buffer_allocator = Shell::array_buffer_allocator;
i::FLAG_hash_seed ^= 1337; // Use a different hash seed.
- Isolate* isolate2 = Isolate::New(create_params);
+ Isolate* isolate2 = Isolate::New(create_params2);
i::FLAG_hash_seed ^= 1337; // Restore old hash seed.
{
- D8Console console(isolate2);
- Initialize(isolate2, &console);
- PerIsolateData data(isolate2);
+ D8Console console2(isolate2);
+ Initialize(isolate2, &console2);
+ PerIsolateData data2(isolate2);
Isolate::Scope isolate_scope(isolate2);
result = RunMain(isolate2, false);
diff --git a/deps/v8/src/date/dateparser.h b/deps/v8/src/date/dateparser.h
index 1a0a0b15ab..9975737c07 100644
--- a/deps/v8/src/date/dateparser.h
+++ b/deps/v8/src/date/dateparser.h
@@ -75,9 +75,6 @@ class DateParser : public AllStatic {
int ReadUnsignedNumeral() {
int n = 0;
int i = 0;
- // First, skip leading zeros
- while (ch_ == '0') Next();
- // And then, do the conversion
while (IsAsciiDigit()) {
if (i < kMaxSignificantDigits) n = n * 10 + ch_ - '0';
i++;
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 5940e2dd02..915ed7833f 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -288,9 +288,8 @@ void DebugEvaluate::ContextBuilder::UpdateValues() {
}
}
-namespace {
-
-bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
+// static
+bool DebugEvaluate::IsSideEffectFreeIntrinsic(Runtime::FunctionId id) {
// Use macro to include only the non-inlined version of an intrinsic.
#define INTRINSIC_ALLOWLIST(V) \
/* Conversions */ \
@@ -385,7 +384,6 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(StringMaxLength) \
V(StringToArray) \
V(AsyncFunctionEnter) \
- V(AsyncFunctionReject) \
V(AsyncFunctionResolve) \
/* Test */ \
V(GetOptimizationStatus) \
@@ -395,7 +393,6 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
// Intrinsics with inline versions have to be allowlisted here a second time.
#define INLINE_INTRINSIC_ALLOWLIST(V) \
V(AsyncFunctionEnter) \
- V(AsyncFunctionReject) \
V(AsyncFunctionResolve)
#define CASE(Name) case Runtime::k##Name:
@@ -418,6 +415,8 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
#undef INLINE_INTRINSIC_ALLOWLIST
}
+namespace {
+
bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
using interpreter::Bytecode;
using interpreter::Bytecodes;
@@ -753,6 +752,7 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtin id) {
case Builtin::kStringFromCharCode:
case Builtin::kStringFromCodePoint:
case Builtin::kStringConstructor:
+ case Builtin::kStringListFromIterable:
case Builtin::kStringPrototypeAnchor:
case Builtin::kStringPrototypeAt:
case Builtin::kStringPrototypeBig:
@@ -831,6 +831,78 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtin id) {
case Builtin::kAllocateRegularInOldGeneration:
return DebugInfo::kHasNoSideEffect;
+#ifdef V8_INTL_SUPPORT
+ // Intl builtins.
+ case Builtin::kIntlGetCanonicalLocales:
+ // Intl.Collator builtins.
+ case Builtin::kCollatorConstructor:
+ case Builtin::kCollatorInternalCompare:
+ case Builtin::kCollatorPrototypeCompare:
+ case Builtin::kCollatorPrototypeResolvedOptions:
+ case Builtin::kCollatorSupportedLocalesOf:
+ // Intl.DateTimeFormat builtins.
+ case Builtin::kDateTimeFormatConstructor:
+ case Builtin::kDateTimeFormatInternalFormat:
+ case Builtin::kDateTimeFormatPrototypeFormat:
+ case Builtin::kDateTimeFormatPrototypeFormatRange:
+ case Builtin::kDateTimeFormatPrototypeFormatRangeToParts:
+ case Builtin::kDateTimeFormatPrototypeFormatToParts:
+ case Builtin::kDateTimeFormatPrototypeResolvedOptions:
+ case Builtin::kDateTimeFormatSupportedLocalesOf:
+ // Intl.DisplayNames builtins.
+ case Builtin::kDisplayNamesConstructor:
+ case Builtin::kDisplayNamesPrototypeOf:
+ case Builtin::kDisplayNamesPrototypeResolvedOptions:
+ case Builtin::kDisplayNamesSupportedLocalesOf:
+ // Intl.ListFormat builtins.
+ case Builtin::kListFormatConstructor:
+ case Builtin::kListFormatPrototypeFormat:
+ case Builtin::kListFormatPrototypeFormatToParts:
+ case Builtin::kListFormatPrototypeResolvedOptions:
+ case Builtin::kListFormatSupportedLocalesOf:
+ // Intl.Locale builtins.
+ case Builtin::kLocaleConstructor:
+ case Builtin::kLocalePrototypeBaseName:
+ case Builtin::kLocalePrototypeCalendar:
+ case Builtin::kLocalePrototypeCalendars:
+ case Builtin::kLocalePrototypeCaseFirst:
+ case Builtin::kLocalePrototypeCollation:
+ case Builtin::kLocalePrototypeCollations:
+ case Builtin::kLocalePrototypeHourCycle:
+ case Builtin::kLocalePrototypeHourCycles:
+ case Builtin::kLocalePrototypeLanguage:
+ case Builtin::kLocalePrototypeMaximize:
+ case Builtin::kLocalePrototypeMinimize:
+ case Builtin::kLocalePrototypeNumeric:
+ case Builtin::kLocalePrototypeNumberingSystem:
+ case Builtin::kLocalePrototypeNumberingSystems:
+ case Builtin::kLocalePrototypeRegion:
+ case Builtin::kLocalePrototypeScript:
+ case Builtin::kLocalePrototypeTextInfo:
+ case Builtin::kLocalePrototypeTimeZones:
+ case Builtin::kLocalePrototypeToString:
+ case Builtin::kLocalePrototypeWeekInfo:
+ // Intl.NumberFormat builtins.
+ case Builtin::kNumberFormatConstructor:
+ case Builtin::kNumberFormatInternalFormatNumber:
+ case Builtin::kNumberFormatPrototypeFormatNumber:
+ case Builtin::kNumberFormatPrototypeFormatToParts:
+ case Builtin::kNumberFormatPrototypeResolvedOptions:
+ case Builtin::kNumberFormatSupportedLocalesOf:
+ // Intl.PluralRules builtins.
+ case Builtin::kPluralRulesConstructor:
+ case Builtin::kPluralRulesPrototypeResolvedOptions:
+ case Builtin::kPluralRulesPrototypeSelect:
+ case Builtin::kPluralRulesSupportedLocalesOf:
+ // Intl.RelativeTimeFormat builtins.
+ case Builtin::kRelativeTimeFormatConstructor:
+ case Builtin::kRelativeTimeFormatPrototypeFormat:
+ case Builtin::kRelativeTimeFormatPrototypeFormatToParts:
+ case Builtin::kRelativeTimeFormatPrototypeResolvedOptions:
+ case Builtin::kRelativeTimeFormatSupportedLocalesOf:
+ return DebugInfo::kHasNoSideEffect;
+#endif // V8_INTL_SUPPORT
+
// Set builtins.
case Builtin::kSetIteratorPrototypeNext:
case Builtin::kSetPrototypeAdd:
@@ -882,6 +954,7 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtin id) {
case Builtin::kRegExpPrototypeUnicodeGetter:
case Builtin::kRegExpPrototypeStickyGetter:
return DebugInfo::kRequiresRuntimeChecks;
+
default:
if (FLAG_trace_side_effect_free_debug_evaluate) {
PrintF("[debug-evaluate] built-in %s may cause side effect.\n",
@@ -902,7 +975,7 @@ bool BytecodeRequiresRuntimeCheck(interpreter::Bytecode bytecode) {
case Bytecode::kStaCurrentContextSlot:
return true;
default:
- return false;
+ return interpreter::Bytecodes::IsCallRuntime(bytecode);
}
}
@@ -929,16 +1002,6 @@ DebugInfo::SideEffectState DebugEvaluate::FunctionGetSideEffectState(
for (interpreter::BytecodeArrayIterator it(bytecode_array); !it.done();
it.Advance()) {
interpreter::Bytecode bytecode = it.current_bytecode();
-
- if (interpreter::Bytecodes::IsCallRuntime(bytecode)) {
- Runtime::FunctionId id =
- (bytecode == interpreter::Bytecode::kInvokeIntrinsic)
- ? it.GetIntrinsicIdOperand(0)
- : it.GetRuntimeIdOperand(0);
- if (IntrinsicHasNoSideEffect(id)) continue;
- return DebugInfo::kHasSideEffects;
- }
-
if (BytecodeHasNoSideEffect(bytecode)) continue;
if (BytecodeRequiresRuntimeCheck(bytecode)) {
requires_runtime_checks = true;
@@ -979,7 +1042,7 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtin caller,
switch (callee) {
// Transitively called Builtins:
case Builtin::kAbort:
- case Builtin::kAbortCSAAssert:
+ case Builtin::kAbortCSADcheck:
case Builtin::kAdaptorWithBuiltinExitFrame:
case Builtin::kArrayConstructorImpl:
case Builtin::kArrayEveryLoopContinuation:
diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h
index 34a6c8d4c7..1a9be54893 100644
--- a/deps/v8/src/debug/debug-evaluate.h
+++ b/deps/v8/src/debug/debug-evaluate.h
@@ -53,6 +53,7 @@ class DebugEvaluate : public AllStatic {
static DebugInfo::SideEffectState FunctionGetSideEffectState(
Isolate* isolate, Handle<SharedFunctionInfo> info);
static void ApplySideEffectChecks(Handle<BytecodeArray> bytecode_array);
+ static bool IsSideEffectFreeIntrinsic(Runtime::FunctionId id);
#ifdef DEBUG
static void VerifyTransitiveBuiltins(Isolate* isolate);
diff --git a/deps/v8/src/debug/debug-interface.cc b/deps/v8/src/debug/debug-interface.cc
index ed995a67b9..e6ae32f9d2 100644
--- a/deps/v8/src/debug/debug-interface.cc
+++ b/deps/v8/src/debug/debug-interface.cc
@@ -17,7 +17,6 @@
#include "src/objects/js-generator-inl.h"
#include "src/objects/stack-frame-info-inl.h"
#include "src/profiler/heap-profiler.h"
-#include "src/regexp/regexp-stack.h"
#include "src/strings/string-builder-inl.h"
#if V8_ENABLE_WEBASSEMBLY
@@ -304,10 +303,7 @@ void SetTerminateOnResume(Isolate* v8_isolate) {
bool CanBreakProgram(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8_DO_NOT_USE(isolate);
- // We cannot break a program if we are currently running a regexp.
- // TODO(yangguo): fix this exception.
- return !isolate->regexp_stack()->is_in_use() &&
- isolate->debug()->AllFramesOnStackAreBlackboxed();
+ return isolate->debug()->AllFramesOnStackAreBlackboxed();
}
Isolate* Script::GetIsolate() const {
@@ -1249,7 +1245,7 @@ MaybeLocal<Message> GetMessageFromPromise(Local<Promise> p) {
}
std::unique_ptr<PropertyIterator> PropertyIterator::Create(
- Local<Context> context, Local<Object> object) {
+ Local<Context> context, Local<Object> object, bool skip_indices) {
internal::Isolate* isolate =
reinterpret_cast<i::Isolate*>(object->GetIsolate());
if (IsExecutionTerminatingCheck(isolate)) {
@@ -1257,8 +1253,8 @@ std::unique_ptr<PropertyIterator> PropertyIterator::Create(
}
CallDepthScope<false> call_depth_scope(isolate, context);
- auto result =
- i::DebugPropertyIterator::Create(isolate, Utils::OpenHandle(*object));
+ auto result = i::DebugPropertyIterator::Create(
+ isolate, Utils::OpenHandle(*object), skip_indices);
if (!result) {
DCHECK(isolate->has_pending_exception());
call_depth_scope.Escape();
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 8575f168fe..8c0ddb46cb 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -619,7 +619,8 @@ class V8_EXPORT_PRIVATE PropertyIterator {
// Creating a PropertyIterator can potentially throw an exception.
// The returned std::unique_ptr is empty iff that happens.
V8_WARN_UNUSED_RESULT static std::unique_ptr<PropertyIterator> Create(
- v8::Local<v8::Context> context, v8::Local<v8::Object> object);
+ v8::Local<v8::Context> context, v8::Local<v8::Object> object,
+ bool skip_indices = false);
virtual ~PropertyIterator() = default;
diff --git a/deps/v8/src/debug/debug-property-iterator.cc b/deps/v8/src/debug/debug-property-iterator.cc
index 5d7ecda979..b0bca65e30 100644
--- a/deps/v8/src/debug/debug-property-iterator.cc
+++ b/deps/v8/src/debug/debug-property-iterator.cc
@@ -15,15 +15,14 @@ namespace v8 {
namespace internal {
std::unique_ptr<DebugPropertyIterator> DebugPropertyIterator::Create(
- Isolate* isolate, Handle<JSReceiver> receiver) {
+ Isolate* isolate, Handle<JSReceiver> receiver, bool skip_indices) {
// Can't use std::make_unique as Ctor is private.
auto iterator = std::unique_ptr<DebugPropertyIterator>(
- new DebugPropertyIterator(isolate, receiver));
+ new DebugPropertyIterator(isolate, receiver, skip_indices));
if (receiver->IsJSProxy()) {
iterator->AdvanceToPrototype();
}
- if (iterator->Done()) return iterator;
if (!iterator->FillKeysForCurrentPrototypeAndStage()) return nullptr;
if (iterator->should_move_to_next_stage() && !iterator->AdvanceInternal()) {
@@ -34,10 +33,15 @@ std::unique_ptr<DebugPropertyIterator> DebugPropertyIterator::Create(
}
DebugPropertyIterator::DebugPropertyIterator(Isolate* isolate,
- Handle<JSReceiver> receiver)
+ Handle<JSReceiver> receiver,
+ bool skip_indices)
: isolate_(isolate),
prototype_iterator_(isolate, receiver, kStartAtReceiver,
- PrototypeIterator::END_AT_NULL) {}
+ PrototypeIterator::END_AT_NULL),
+ skip_indices_(skip_indices),
+ current_key_index_(0),
+ current_keys_(isolate_->factory()->empty_fixed_array()),
+ current_keys_length_(0) {}
bool DebugPropertyIterator::Done() const { return is_done_; }
@@ -54,13 +58,13 @@ bool DebugPropertyIterator::AdvanceInternal() {
calculated_native_accessor_flags_ = false;
while (should_move_to_next_stage()) {
switch (stage_) {
- case Stage::kExoticIndices:
- stage_ = Stage::kEnumerableStrings;
+ case kExoticIndices:
+ stage_ = kEnumerableStrings;
break;
- case Stage::kEnumerableStrings:
- stage_ = Stage::kAllProperties;
+ case kEnumerableStrings:
+ stage_ = kAllProperties;
break;
- case Stage::kAllProperties:
+ case kAllProperties:
AdvanceToPrototype();
break;
}
@@ -70,20 +74,17 @@ bool DebugPropertyIterator::AdvanceInternal() {
}
bool DebugPropertyIterator::is_native_accessor() {
- if (stage_ == kExoticIndices) return false;
CalculateNativeAccessorFlags();
return native_accessor_flags_;
}
bool DebugPropertyIterator::has_native_getter() {
- if (stage_ == kExoticIndices) return false;
CalculateNativeAccessorFlags();
return native_accessor_flags_ &
static_cast<int>(debug::NativeAccessorType::HasGetter);
}
bool DebugPropertyIterator::has_native_setter() {
- if (stage_ == kExoticIndices) return false;
CalculateNativeAccessorFlags();
return native_accessor_flags_ &
static_cast<int>(debug::NativeAccessorType::HasSetter);
@@ -95,7 +96,7 @@ Handle<Name> DebugPropertyIterator::raw_name() const {
return isolate_->factory()->SizeToString(current_key_index_);
} else {
return Handle<Name>::cast(FixedArray::get(
- *keys_, static_cast<int>(current_key_index_), isolate_));
+ *current_keys_, static_cast<int>(current_key_index_), isolate_));
}
}
@@ -140,42 +141,38 @@ bool DebugPropertyIterator::is_own() { return is_own_; }
bool DebugPropertyIterator::is_array_index() {
if (stage_ == kExoticIndices) return true;
- uint32_t index = 0;
- return raw_name()->AsArrayIndex(&index);
+ PropertyKey key(isolate_, raw_name());
+ return key.is_element();
}
bool DebugPropertyIterator::FillKeysForCurrentPrototypeAndStage() {
current_key_index_ = 0;
- exotic_length_ = 0;
- keys_ = Handle<FixedArray>::null();
+ current_keys_ = isolate_->factory()->empty_fixed_array();
+ current_keys_length_ = 0;
if (is_done_) return true;
Handle<JSReceiver> receiver =
PrototypeIterator::GetCurrent<JSReceiver>(prototype_iterator_);
- bool has_exotic_indices = receiver->IsJSTypedArray();
if (stage_ == kExoticIndices) {
- if (!has_exotic_indices) return true;
+ if (skip_indices_ || !receiver->IsJSTypedArray()) return true;
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(receiver);
- exotic_length_ = typed_array->WasDetached() ? 0 : typed_array->length();
+ current_keys_length_ =
+ typed_array->WasDetached() ? 0 : typed_array->length();
return true;
}
- bool skip_indices = has_exotic_indices;
PropertyFilter filter =
stage_ == kEnumerableStrings ? ENUMERABLE_STRINGS : ALL_PROPERTIES;
- if (!KeyAccumulator::GetKeys(receiver, KeyCollectionMode::kOwnOnly, filter,
- GetKeysConversion::kConvertToString, false,
- skip_indices)
- .ToHandle(&keys_)) {
- keys_ = Handle<FixedArray>::null();
- return false;
+ if (KeyAccumulator::GetKeys(receiver, KeyCollectionMode::kOwnOnly, filter,
+ GetKeysConversion::kConvertToString, false,
+ skip_indices_ || receiver->IsJSTypedArray())
+ .ToHandle(&current_keys_)) {
+ current_keys_length_ = current_keys_->length();
+ return true;
}
- return true;
+ return false;
}
bool DebugPropertyIterator::should_move_to_next_stage() const {
- if (is_done_) return false;
- if (stage_ == kExoticIndices) return current_key_index_ >= exotic_length_;
- return keys_.is_null() ||
- current_key_index_ >= static_cast<size_t>(keys_->length());
+ return !is_done_ && current_key_index_ >= current_keys_length_;
}
namespace {
@@ -210,10 +207,14 @@ base::Flags<debug::NativeAccessorType, int> GetNativeAccessorDescriptorInternal(
void DebugPropertyIterator::CalculateNativeAccessorFlags() {
if (calculated_native_accessor_flags_) return;
- Handle<JSReceiver> receiver =
- PrototypeIterator::GetCurrent<JSReceiver>(prototype_iterator_);
- native_accessor_flags_ =
- GetNativeAccessorDescriptorInternal(receiver, raw_name());
+ if (stage_ == kExoticIndices) {
+ native_accessor_flags_ = 0;
+ } else {
+ Handle<JSReceiver> receiver =
+ PrototypeIterator::GetCurrent<JSReceiver>(prototype_iterator_);
+ native_accessor_flags_ =
+ GetNativeAccessorDescriptorInternal(receiver, raw_name());
+ }
calculated_native_accessor_flags_ = true;
}
} // namespace internal
diff --git a/deps/v8/src/debug/debug-property-iterator.h b/deps/v8/src/debug/debug-property-iterator.h
index 4e6a93f10e..b28fe78ac8 100644
--- a/deps/v8/src/debug/debug-property-iterator.h
+++ b/deps/v8/src/debug/debug-property-iterator.h
@@ -24,7 +24,7 @@ class JSReceiver;
class DebugPropertyIterator final : public debug::PropertyIterator {
public:
V8_WARN_UNUSED_RESULT static std::unique_ptr<DebugPropertyIterator> Create(
- Isolate* isolate, Handle<JSReceiver> receiver);
+ Isolate* isolate, Handle<JSReceiver> receiver, bool skip_indices);
~DebugPropertyIterator() override = default;
DebugPropertyIterator(const DebugPropertyIterator&) = delete;
DebugPropertyIterator& operator=(const DebugPropertyIterator&) = delete;
@@ -43,7 +43,8 @@ class DebugPropertyIterator final : public debug::PropertyIterator {
bool is_array_index() override;
private:
- DebugPropertyIterator(Isolate* isolate, Handle<JSReceiver> receiver);
+ DebugPropertyIterator(Isolate* isolate, Handle<JSReceiver> receiver,
+ bool skip_indices);
V8_WARN_UNUSED_RESULT bool FillKeysForCurrentPrototypeAndStage();
bool should_move_to_next_stage() const;
@@ -54,12 +55,16 @@ class DebugPropertyIterator final : public debug::PropertyIterator {
Isolate* isolate_;
PrototypeIterator prototype_iterator_;
- enum Stage { kExoticIndices = 0, kEnumerableStrings = 1, kAllProperties = 2 };
- Stage stage_ = kExoticIndices;
-
- size_t current_key_index_ = 0;
- Handle<FixedArray> keys_;
- size_t exotic_length_ = 0;
+ enum {
+ kExoticIndices = 0,
+ kEnumerableStrings = 1,
+ kAllProperties = 2
+ } stage_ = kExoticIndices;
+ bool skip_indices_;
+
+ size_t current_key_index_;
+ Handle<FixedArray> current_keys_;
+ size_t current_keys_length_;
bool calculated_native_accessor_flags_ = false;
int native_accessor_flags_ = 0;
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 4cf0124e8c..031910b4dc 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -24,7 +24,7 @@
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/v8threads.h"
-#include "src/handles/global-handles.h"
+#include "src/handles/global-handles-inl.h"
#include "src/heap/heap-inl.h" // For NextDebuggingId.
#include "src/init/bootstrapper.h"
#include "src/interpreter/bytecode-array-iterator.h"
@@ -1583,7 +1583,16 @@ class SharedFunctionInfoFinder {
}
if (start_position > target_position_) return;
- if (target_position_ > shared.EndPosition()) return;
+ if (target_position_ >= shared.EndPosition()) {
+ // The SharedFunctionInfo::EndPosition() is generally exclusive, but there
+ // are assumptions in various places in the debugger that for script level
+ // (toplevel function) there's an end position that is technically outside
+ // the script. It might be worth revisiting the overall design here at
+ // some point in the future.
+ if (!shared.is_toplevel() || target_position_ > shared.EndPosition()) {
+ return;
+ }
+ }
if (!current_candidate_.is_null()) {
if (current_start_position_ == start_position &&
@@ -2686,6 +2695,18 @@ bool Debug::PerformSideEffectCheckAtBytecode(InterpretedFrame* frame) {
handle(bytecode_array, isolate_), offset);
Bytecode bytecode = bytecode_iterator.current_bytecode();
+ if (interpreter::Bytecodes::IsCallRuntime(bytecode)) {
+ auto id = (bytecode == Bytecode::kInvokeIntrinsic)
+ ? bytecode_iterator.GetIntrinsicIdOperand(0)
+ : bytecode_iterator.GetRuntimeIdOperand(0);
+ if (DebugEvaluate::IsSideEffectFreeIntrinsic(id)) {
+ return true;
+ }
+ side_effect_check_failed_ = true;
+ // Throw an uncatchable termination exception.
+ isolate_->TerminateExecution();
+ return false;
+ }
interpreter::Register reg;
switch (bytecode) {
case Bytecode::kStaCurrentContextSlot:
diff --git a/deps/v8/src/diagnostics/arm/disasm-arm.cc b/deps/v8/src/diagnostics/arm/disasm-arm.cc
index 7ba20c0d98..01b697b4bb 100644
--- a/deps/v8/src/diagnostics/arm/disasm-arm.cc
+++ b/deps/v8/src/diagnostics/arm/disasm-arm.cc
@@ -2026,13 +2026,11 @@ void Decoder::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
int op1 = instr->Bit(4);
if (op0 == 0) {
// Advanced SIMD three registers of same length.
- int Vd, Vm, Vn;
+ int Vm, Vn;
if (instr->Bit(6) == 0) {
- Vd = instr->VFPDRegValue(kDoublePrecision);
Vm = instr->VFPMRegValue(kDoublePrecision);
Vn = instr->VFPNRegValue(kDoublePrecision);
} else {
- Vd = instr->VFPDRegValue(kSimd128Precision);
Vm = instr->VFPMRegValue(kSimd128Precision);
Vn = instr->VFPNRegValue(kSimd128Precision);
}
diff --git a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
index 8f721c997d..fbcba1a4b2 100644
--- a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
+++ b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
@@ -818,6 +818,20 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer(",%d", Imm8_U(current));
current++;
break;
+ case 0x0a:
+ AppendToBuffer("vroundss %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%d", Imm8_U(current));
+ current++;
+ break;
+ case 0x0b:
+ AppendToBuffer("vroundsd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%d", Imm8_U(current));
+ current++;
+ break;
case 0x0E:
AppendToBuffer("vpblendw %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -910,39 +924,8 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer("vmovddup %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
- case 0x51:
- AppendToBuffer("vsqrtsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x58:
- AppendToBuffer("vaddsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x59:
- AppendToBuffer("vmulsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x5C:
- AppendToBuffer("vsubsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x5D:
- AppendToBuffer("vminsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x5E:
- AppendToBuffer("vdivsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x5F:
- AppendToBuffer("vmaxsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
+ case 0x2c:
+ AppendToBuffer("vcvttsd2si %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
case 0x70:
@@ -956,6 +939,14 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+#define DISASM_SSE2_INSTRUCTION_LIST_SD(instruction, _1, _2, opcode) \
+ case 0x##opcode: \
+ AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \
+ NameOfXMMRegister(vvvv)); \
+ current += PrintRightXMMOperand(current); \
+ break;
+ SSE2_INSTRUCTION_LIST_SD(DISASM_SSE2_INSTRUCTION_LIST_SD)
+#undef DISASM_SSE2_INSTRUCTION_LIST_SD
default:
UnimplementedInstruction();
}
@@ -977,6 +968,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer("vmovshdup %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
+ case 0x2c:
+ AppendToBuffer("vcvttss2si %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x51:
AppendToBuffer("vsqrtss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -992,6 +987,11 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x5a:
+ AppendToBuffer("vcvtss2sd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x5B:
AppendToBuffer("vcvttps2dq %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
@@ -1177,6 +1177,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer("vmovaps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
+ case 0x2e:
+ AppendToBuffer("vucomiss %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x50:
AppendToBuffer("vmovmskps %s,%s", NameOfCPURegister(regop),
NameOfXMMRegister(rm));
@@ -1282,6 +1286,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer("vmovapd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
+ case 0x2e:
+ AppendToBuffer("vucomisd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x50:
AppendToBuffer("vmovmskpd %s,%s", NameOfCPURegister(regop),
NameOfXMMRegister(rm));
@@ -2662,30 +2670,15 @@ int DisassemblerIA32::InstructionDecode(v8::base::Vector<char> out_buffer,
case 0x2D:
mnem = "cvtsd2si";
break;
- case 0x51:
- mnem = "sqrtsd";
- break;
- case 0x58:
- mnem = "addsd";
- break;
- case 0x59:
- mnem = "mulsd";
- break;
- case 0x5C:
- mnem = "subsd";
- break;
- case 0x5D:
- mnem = "minsd";
- break;
- case 0x5E:
- mnem = "divsd";
- break;
- case 0x5F:
- mnem = "maxsd";
- break;
case 0x7C:
mnem = "haddps";
break;
+#define MNEM_FOR_SSE2_INSTRUCTION_LSIT_SD(instruction, _1, _2, opcode) \
+ case 0x##opcode: \
+ mnem = "" #instruction; \
+ break;
+ SSE2_INSTRUCTION_LIST_SD(MNEM_FOR_SSE2_INSTRUCTION_LSIT_SD)
+#undef MNEM_FOR_SSE2_INSTRUCTION_LSIT_SD
}
data += 3;
int mod, regop, rm;
diff --git a/deps/v8/src/diagnostics/loong64/disasm-loong64.cc b/deps/v8/src/diagnostics/loong64/disasm-loong64.cc
index 1c41a3896a..9d8aee96a3 100644
--- a/deps/v8/src/diagnostics/loong64/disasm-loong64.cc
+++ b/deps/v8/src/diagnostics/loong64/disasm-loong64.cc
@@ -62,7 +62,6 @@ class Decoder {
void PrintUi5(Instruction* instr);
void PrintUi6(Instruction* instr);
void PrintUi12(Instruction* instr);
- void PrintXi12(Instruction* instr);
void PrintMsbw(Instruction* instr);
void PrintLsbw(Instruction* instr);
void PrintMsbd(Instruction* instr);
@@ -72,6 +71,8 @@ class Decoder {
void PrintSi14(Instruction* instr);
void PrintSi16(Instruction* instr);
void PrintSi20(Instruction* instr);
+ void PrintXi12(Instruction* instr);
+ void PrintXi20(Instruction* instr);
void PrintCj(Instruction* instr);
void PrintCd(Instruction* instr);
void PrintCa(Instruction* instr);
@@ -206,6 +207,11 @@ void Decoder::PrintXi12(Instruction* instr) {
out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", xi);
}
+void Decoder::PrintXi20(Instruction* instr) {
+ int xi = instr->Si20Value();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", xi);
+}
+
void Decoder::PrintMsbd(Instruction* instr) {
int msbd = instr->MsbdValue();
out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", msbd);
@@ -228,23 +234,27 @@ void Decoder::PrintLsbw(Instruction* instr) {
void Decoder::PrintSi12(Instruction* instr) {
int si = ((instr->Si12Value()) << (32 - kSi12Bits)) >> (32 - kSi12Bits);
- out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d(0x%x)",
+ si, instr->Si12Value());
}
void Decoder::PrintSi14(Instruction* instr) {
int si = ((instr->Si14Value()) << (32 - kSi14Bits)) >> (32 - kSi14Bits);
si <<= 2;
- out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d(0x%x)",
+ si, instr->Si14Value() << 2);
}
void Decoder::PrintSi16(Instruction* instr) {
int si = ((instr->Si16Value()) << (32 - kSi16Bits)) >> (32 - kSi16Bits);
- out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d(0x%x)",
+ si, instr->Si16Value());
}
void Decoder::PrintSi20(Instruction* instr) {
int si = ((instr->Si20Value()) << (32 - kSi20Bits)) >> (32 - kSi20Bits);
- out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d(0x%x)",
+ si, instr->Si20Value());
}
void Decoder::PrintCj(Instruction* instr) {
@@ -314,23 +324,20 @@ void Decoder::PrintPCOffs26(Instruction* instr) {
void Decoder::PrintOffs16(Instruction* instr) {
int offs = instr->Offs16Value();
- offs <<= (32 - kOffsLowBits);
- offs >>= (32 - kOffsLowBits);
- out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs);
+ out_buffer_pos_ +=
+ base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", offs << 2);
}
void Decoder::PrintOffs21(Instruction* instr) {
int offs = instr->Offs21Value();
- offs <<= (32 - kOffsLowBits - kOffs21HighBits);
- offs >>= (32 - kOffsLowBits - kOffs21HighBits);
- out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs);
+ out_buffer_pos_ +=
+ base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", offs << 2);
}
void Decoder::PrintOffs26(Instruction* instr) {
int offs = instr->Offs26Value();
- offs <<= (32 - kOffsLowBits - kOffs26HighBits);
- offs >>= (32 - kOffsLowBits - kOffs26HighBits);
- out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs);
+ out_buffer_pos_ +=
+ base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", offs << 2);
}
// Handle all register based formatting in this function to reduce the
@@ -541,9 +548,16 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
break;
}
case 'x': {
- DCHECK(STRING_STARTS_WITH(format, "xi12"));
- PrintXi12(instr);
- return 4;
+ if (format[2] == '2') {
+ DCHECK(STRING_STARTS_WITH(format, "xi20"));
+ PrintXi20(instr);
+ return 4;
+ } else if (format[3] == '2') {
+ DCHECK(STRING_STARTS_WITH(format, "xi12"));
+ PrintXi12(instr);
+ return 4;
+ }
+ break;
}
default:
UNREACHABLE();
@@ -587,53 +601,53 @@ int Decoder::DecodeBreakInstr(Instruction* instr) {
Format(instr, "break, code: 'code");
return kInstrSize;
}*/
- Format(instr, "break code: 'code");
+ Format(instr, "break code: 'code");
return kInstrSize;
} //===================================================
void Decoder::DecodeTypekOp6(Instruction* instr) {
switch (instr->Bits(31, 26) << 26) {
case ADDU16I_D:
- Format(instr, "addu16i.d 'rd, 'rj, 'si16");
+ Format(instr, "addu16i.d 'rd, 'rj, 'si16");
break;
case BEQZ:
- Format(instr, "beqz 'rj, 'offs21 -> 'pcoffs21");
+ Format(instr, "beqz 'rj, 'offs21 -> 'pcoffs21");
break;
case BNEZ:
- Format(instr, "bnez 'rj, 'offs21 -> 'pcoffs21");
+ Format(instr, "bnez 'rj, 'offs21 -> 'pcoffs21");
break;
case BCZ:
if (instr->Bit(8))
- Format(instr, "bcnez fcc'cj, 'offs21 -> 'pcoffs21");
+ Format(instr, "bcnez fcc'cj, 'offs21 -> 'pcoffs21");
else
- Format(instr, "bceqz fcc'cj, 'offs21 -> 'pcoffs21");
+ Format(instr, "bceqz fcc'cj, 'offs21 -> 'pcoffs21");
break;
case JIRL:
- Format(instr, "jirl 'rd, 'rj, 'offs16");
+ Format(instr, "jirl 'rd, 'rj, 'offs16");
break;
case B:
- Format(instr, "b 'offs26 -> 'pcoffs26");
+ Format(instr, "b 'offs26 -> 'pcoffs26");
break;
case BL:
- Format(instr, "bl 'offs26 -> 'pcoffs26");
+ Format(instr, "bl 'offs26 -> 'pcoffs26");
break;
case BEQ:
- Format(instr, "beq 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ Format(instr, "beq 'rj, 'rd, 'offs16 -> 'pcoffs16");
break;
case BNE:
- Format(instr, "bne 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ Format(instr, "bne 'rj, 'rd, 'offs16 -> 'pcoffs16");
break;
case BLT:
- Format(instr, "blt 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ Format(instr, "blt 'rj, 'rd, 'offs16 -> 'pcoffs16");
break;
case BGE:
- Format(instr, "bge 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ Format(instr, "bge 'rj, 'rd, 'offs16 -> 'pcoffs16");
break;
case BLTU:
- Format(instr, "bltu 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ Format(instr, "bltu 'rj, 'rd, 'offs16 -> 'pcoffs16");
break;
case BGEU:
- Format(instr, "bgeu 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ Format(instr, "bgeu 'rj, 'rd, 'offs16 -> 'pcoffs16");
break;
default:
UNREACHABLE();
@@ -643,22 +657,22 @@ void Decoder::DecodeTypekOp6(Instruction* instr) {
void Decoder::DecodeTypekOp7(Instruction* instr) {
switch (instr->Bits(31, 25) << 25) {
case LU12I_W:
- Format(instr, "lu12i.w 'rd, 'si20");
+ Format(instr, "lu12i.w 'rd, 'xi20");
break;
case LU32I_D:
- Format(instr, "lu32i.d 'rd, 'si20");
+ Format(instr, "lu32i.d 'rd, 'xi20");
break;
case PCADDI:
- Format(instr, "pcaddi 'rd, 'si20");
+ Format(instr, "pcaddi 'rd, 'xi20");
break;
case PCALAU12I:
- Format(instr, "pcalau12i 'rd, 'si20");
+ Format(instr, "pcalau12i 'rd, 'xi20");
break;
case PCADDU12I:
- Format(instr, "pcaddu12i 'rd, 'si20");
+ Format(instr, "pcaddu12i 'rd, 'xi20");
break;
case PCADDU18I:
- Format(instr, "pcaddu18i 'rd, 'si20");
+ Format(instr, "pcaddu18i 'rd, 'xi20");
break;
default:
UNREACHABLE();
@@ -668,28 +682,28 @@ void Decoder::DecodeTypekOp7(Instruction* instr) {
void Decoder::DecodeTypekOp8(Instruction* instr) {
switch (instr->Bits(31, 24) << 24) {
case LDPTR_W:
- Format(instr, "ldptr.w 'rd, 'rj, 'si14");
+ Format(instr, "ldptr.w 'rd, 'rj, 'si14");
break;
case STPTR_W:
- Format(instr, "stptr.w 'rd, 'rj, 'si14");
+ Format(instr, "stptr.w 'rd, 'rj, 'si14");
break;
case LDPTR_D:
- Format(instr, "ldptr.d 'rd, 'rj, 'si14");
+ Format(instr, "ldptr.d 'rd, 'rj, 'si14");
break;
case STPTR_D:
- Format(instr, "stptr.d 'rd, 'rj, 'si14");
+ Format(instr, "stptr.d 'rd, 'rj, 'si14");
break;
case LL_W:
- Format(instr, "ll.w 'rd, 'rj, 'si14");
+ Format(instr, "ll.w 'rd, 'rj, 'si14");
break;
case SC_W:
- Format(instr, "sc.w 'rd, 'rj, 'si14");
+ Format(instr, "sc.w 'rd, 'rj, 'si14");
break;
case LL_D:
- Format(instr, "ll.d 'rd, 'rj, 'si14");
+ Format(instr, "ll.d 'rd, 'rj, 'si14");
break;
case SC_D:
- Format(instr, "sc.d 'rd, 'rj, 'si14");
+ Format(instr, "sc.d 'rd, 'rj, 'si14");
break;
default:
UNREACHABLE();
@@ -701,87 +715,87 @@ void Decoder::DecodeTypekOp10(Instruction* instr) {
case BSTR_W: {
if (instr->Bit(21) != 0) {
if (instr->Bit(15) == 0) {
- Format(instr, "bstrins.w 'rd, 'rj, 'msbw, 'lsbw");
+ Format(instr, "bstrins.w 'rd, 'rj, 'msbw, 'lsbw");
} else {
- Format(instr, "bstrpick.w 'rd, 'rj, 'msbw, 'lsbw");
+ Format(instr, "bstrpick.w 'rd, 'rj, 'msbw, 'lsbw");
}
}
break;
}
case BSTRINS_D:
- Format(instr, "bstrins.d 'rd, 'rj, 'msbd, 'lsbd");
+ Format(instr, "bstrins.d 'rd, 'rj, 'msbd, 'lsbd");
break;
case BSTRPICK_D:
- Format(instr, "bstrpick.d 'rd, 'rj, 'msbd, 'lsbd");
+ Format(instr, "bstrpick.d 'rd, 'rj, 'msbd, 'lsbd");
break;
case SLTI:
- Format(instr, "slti 'rd, 'rj, 'si12");
+ Format(instr, "slti 'rd, 'rj, 'si12");
break;
case SLTUI:
- Format(instr, "sltui 'rd, 'rj, 'si12");
+ Format(instr, "sltui 'rd, 'rj, 'si12");
break;
case ADDI_W:
- Format(instr, "addi.w 'rd, 'rj, 'si12");
+ Format(instr, "addi.w 'rd, 'rj, 'si12");
break;
case ADDI_D:
- Format(instr, "addi.d 'rd, 'rj, 'si12");
+ Format(instr, "addi.d 'rd, 'rj, 'si12");
break;
case LU52I_D:
- Format(instr, "lu52i.d 'rd, 'rj, 'si12");
+ Format(instr, "lu52i.d 'rd, 'rj, 'xi12");
break;
case ANDI:
- Format(instr, "andi 'rd, 'rj, 'xi12");
+ Format(instr, "andi 'rd, 'rj, 'xi12");
break;
case ORI:
- Format(instr, "ori 'rd, 'rj, 'xi12");
+ Format(instr, "ori 'rd, 'rj, 'xi12");
break;
case XORI:
- Format(instr, "xori 'rd, 'rj, 'xi12");
+ Format(instr, "xori 'rd, 'rj, 'xi12");
break;
case LD_B:
- Format(instr, "ld.b 'rd, 'rj, 'si12");
+ Format(instr, "ld.b 'rd, 'rj, 'si12");
break;
case LD_H:
- Format(instr, "ld.h 'rd, 'rj, 'si12");
+ Format(instr, "ld.h 'rd, 'rj, 'si12");
break;
case LD_W:
- Format(instr, "ld.w 'rd, 'rj, 'si12");
+ Format(instr, "ld.w 'rd, 'rj, 'si12");
break;
case LD_D:
- Format(instr, "ld.d 'rd, 'rj, 'si12");
+ Format(instr, "ld.d 'rd, 'rj, 'si12");
break;
case ST_B:
- Format(instr, "st.b 'rd, 'rj, 'si12");
+ Format(instr, "st.b 'rd, 'rj, 'si12");
break;
case ST_H:
- Format(instr, "st.h 'rd, 'rj, 'si12");
+ Format(instr, "st.h 'rd, 'rj, 'si12");
break;
case ST_W:
- Format(instr, "st.w 'rd, 'rj, 'si12");
+ Format(instr, "st.w 'rd, 'rj, 'si12");
break;
case ST_D:
- Format(instr, "st.d 'rd, 'rj, 'si12");
+ Format(instr, "st.d 'rd, 'rj, 'si12");
break;
case LD_BU:
- Format(instr, "ld.bu 'rd, 'rj, 'si12");
+ Format(instr, "ld.bu 'rd, 'rj, 'si12");
break;
case LD_HU:
- Format(instr, "ld.hu 'rd, 'rj, 'si12");
+ Format(instr, "ld.hu 'rd, 'rj, 'si12");
break;
case LD_WU:
- Format(instr, "ld.wu 'rd, 'rj, 'si12");
+ Format(instr, "ld.wu 'rd, 'rj, 'si12");
break;
case FLD_S:
- Format(instr, "fld.s 'fd, 'rj, 'si12");
+ Format(instr, "fld.s 'fd, 'rj, 'si12");
break;
case FST_S:
- Format(instr, "fst.s 'fd, 'rj, 'si12");
+ Format(instr, "fst.s 'fd, 'rj, 'si12");
break;
case FLD_D:
- Format(instr, "fld.d 'fd, 'rj, 'si12");
+ Format(instr, "fld.d 'fd, 'rj, 'si12");
break;
case FST_D:
- Format(instr, "fst.d 'fd, 'rj, 'si12");
+ Format(instr, "fst.d 'fd, 'rj, 'si12");
break;
default:
UNREACHABLE();
@@ -791,16 +805,16 @@ void Decoder::DecodeTypekOp10(Instruction* instr) {
void Decoder::DecodeTypekOp12(Instruction* instr) {
switch (instr->Bits(31, 20) << 20) {
case FMADD_S:
- Format(instr, "fmadd.s 'fd, 'fj, 'fk, 'fa");
+ Format(instr, "fmadd.s 'fd, 'fj, 'fk, 'fa");
break;
case FMADD_D:
- Format(instr, "fmadd.d 'fd, 'fj, 'fk, 'fa");
+ Format(instr, "fmadd.d 'fd, 'fj, 'fk, 'fa");
break;
case FMSUB_S:
- Format(instr, "fmsub.s 'fd, 'fj, 'fk, 'fa");
+ Format(instr, "fmsub.s 'fd, 'fj, 'fk, 'fa");
break;
case FMSUB_D:
- Format(instr, "fmsub.d 'fd, 'fj, 'fk, 'fa");
+ Format(instr, "fmsub.d 'fd, 'fj, 'fk, 'fa");
break;
case FNMADD_S:
Format(instr, "fnmadd.s 'fd, 'fj, 'fk, 'fa");
@@ -817,67 +831,67 @@ void Decoder::DecodeTypekOp12(Instruction* instr) {
case FCMP_COND_S:
switch (instr->Bits(19, 15)) {
case CAF:
- Format(instr, "fcmp.caf.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.caf.s fcc'cd, 'fj, 'fk");
break;
case SAF:
- Format(instr, "fcmp.saf.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.saf.s fcc'cd, 'fj, 'fk");
break;
case CLT:
- Format(instr, "fcmp.clt.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.clt.s fcc'cd, 'fj, 'fk");
break;
case CEQ:
- Format(instr, "fcmp.ceq.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.ceq.s fcc'cd, 'fj, 'fk");
break;
case SEQ:
- Format(instr, "fcmp.seq.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.seq.s fcc'cd, 'fj, 'fk");
break;
case CLE:
- Format(instr, "fcmp.cle.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.cle.s fcc'cd, 'fj, 'fk");
break;
case SLE:
- Format(instr, "fcmp.sle.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.sle.s fcc'cd, 'fj, 'fk");
break;
case CUN:
- Format(instr, "fcmp.cun.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.cun.s fcc'cd, 'fj, 'fk");
break;
case SUN:
- Format(instr, "fcmp.sun.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.sun.s fcc'cd, 'fj, 'fk");
break;
case CULT:
- Format(instr, "fcmp.cult.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.cult.s fcc'cd, 'fj, 'fk");
break;
case SULT:
- Format(instr, "fcmp.sult.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.sult.s fcc'cd, 'fj, 'fk");
break;
case CUEQ:
- Format(instr, "fcmp.cueq.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.cueq.s fcc'cd, 'fj, 'fk");
break;
case SUEQ:
- Format(instr, "fcmp.sueq.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.sueq.s fcc'cd, 'fj, 'fk");
break;
case CULE:
- Format(instr, "fcmp.cule.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.cule.s fcc'cd, 'fj, 'fk");
break;
case SULE:
- Format(instr, "fcmp.sule.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.sule.s fcc'cd, 'fj, 'fk");
break;
case CNE:
- Format(instr, "fcmp.cne.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.cne.s fcc'cd, 'fj, 'fk");
break;
case SNE:
- Format(instr, "fcmp.sne.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.sne.s fcc'cd, 'fj, 'fk");
break;
case COR:
- Format(instr, "fcmp.cor.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.cor.s fcc'cd, 'fj, 'fk");
break;
case SOR:
- Format(instr, "fcmp.sor.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.sor.s fcc'cd, 'fj, 'fk");
break;
case CUNE:
- Format(instr, "fcmp.cune.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.cune.s fcc'cd, 'fj, 'fk");
break;
case SUNE:
- Format(instr, "fcmp.sune.s fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.sune.s fcc'cd, 'fj, 'fk");
break;
default:
UNREACHABLE();
@@ -886,74 +900,74 @@ void Decoder::DecodeTypekOp12(Instruction* instr) {
case FCMP_COND_D:
switch (instr->Bits(19, 15)) {
case CAF:
- Format(instr, "fcmp.caf.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.caf.d fcc'cd, 'fj, 'fk");
break;
case SAF:
- Format(instr, "fcmp.saf.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.saf.d fcc'cd, 'fj, 'fk");
break;
case CLT:
- Format(instr, "fcmp.clt.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.clt.d fcc'cd, 'fj, 'fk");
break;
case CEQ:
- Format(instr, "fcmp.ceq.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.ceq.d fcc'cd, 'fj, 'fk");
break;
case SEQ:
- Format(instr, "fcmp.seq.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.seq.d fcc'cd, 'fj, 'fk");
break;
case CLE:
- Format(instr, "fcmp.cle.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.cle.d fcc'cd, 'fj, 'fk");
break;
case SLE:
- Format(instr, "fcmp.sle.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.sle.d fcc'cd, 'fj, 'fk");
break;
case CUN:
- Format(instr, "fcmp.cun.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.cun.d fcc'cd, 'fj, 'fk");
break;
case SUN:
- Format(instr, "fcmp.sun.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.sun.d fcc'cd, 'fj, 'fk");
break;
case CULT:
- Format(instr, "fcmp.cult.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.cult.d fcc'cd, 'fj, 'fk");
break;
case SULT:
- Format(instr, "fcmp.sult.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.sult.d fcc'cd, 'fj, 'fk");
break;
case CUEQ:
- Format(instr, "fcmp.cueq.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.cueq.d fcc'cd, 'fj, 'fk");
break;
case SUEQ:
- Format(instr, "fcmp.sueq.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.sueq.d fcc'cd, 'fj, 'fk");
break;
case CULE:
- Format(instr, "fcmp.cule.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.cule.d fcc'cd, 'fj, 'fk");
break;
case SULE:
- Format(instr, "fcmp.sule.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.sule.d fcc'cd, 'fj, 'fk");
break;
case CNE:
- Format(instr, "fcmp.cne.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.cne.d fcc'cd, 'fj, 'fk");
break;
case SNE:
- Format(instr, "fcmp.sne.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.sne.d fcc'cd, 'fj, 'fk");
break;
case COR:
- Format(instr, "fcmp.cor.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.cor.d fcc'cd, 'fj, 'fk");
break;
case SOR:
- Format(instr, "fcmp.sor.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.sor.d fcc'cd, 'fj, 'fk");
break;
case CUNE:
- Format(instr, "fcmp.cune.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.cune.d fcc'cd, 'fj, 'fk");
break;
case SUNE:
- Format(instr, "fcmp.sune.d fcc'cd, 'fj, 'fk");
+ Format(instr, "fcmp.sune.d fcc'cd, 'fj, 'fk");
break;
default:
UNREACHABLE();
}
break;
case FSEL:
- Format(instr, "fsel 'fd, 'fj, 'fk, fcc'ca");
+ Format(instr, "fsel 'fd, 'fj, 'fk, fcc'ca");
break;
default:
UNREACHABLE();
@@ -964,42 +978,42 @@ void Decoder::DecodeTypekOp14(Instruction* instr) {
switch (instr->Bits(31, 18) << 18) {
case ALSL:
if (instr->Bit(17))
- Format(instr, "alsl.wu 'rd, 'rj, 'rk, 'sa2");
+ Format(instr, "alsl.wu 'rd, 'rj, 'rk, 'sa2");
else
- Format(instr, "alsl.w 'rd, 'rj, 'rk, 'sa2");
+ Format(instr, "alsl.w 'rd, 'rj, 'rk, 'sa2");
break;
case BYTEPICK_W:
- Format(instr, "bytepick.w 'rd, 'rj, 'rk, 'sa2");
+ Format(instr, "bytepick.w 'rd, 'rj, 'rk, 'sa2");
break;
case BYTEPICK_D:
- Format(instr, "bytepick.d 'rd, 'rj, 'rk, 'sa3");
+ Format(instr, "bytepick.d 'rd, 'rj, 'rk, 'sa3");
break;
case ALSL_D:
- Format(instr, "alsl.d 'rd, 'rj, 'rk, 'sa2");
+ Format(instr, "alsl.d 'rd, 'rj, 'rk, 'sa2");
break;
case SLLI:
if (instr->Bit(16))
- Format(instr, "slli.d 'rd, 'rj, 'ui6");
+ Format(instr, "slli.d 'rd, 'rj, 'ui6");
else
- Format(instr, "slli.w 'rd, 'rj, 'ui5");
+ Format(instr, "slli.w 'rd, 'rj, 'ui5");
break;
case SRLI:
if (instr->Bit(16))
- Format(instr, "srli.d 'rd, 'rj, 'ui6");
+ Format(instr, "srli.d 'rd, 'rj, 'ui6");
else
- Format(instr, "srli.w 'rd, 'rj, 'ui5");
+ Format(instr, "srli.w 'rd, 'rj, 'ui5");
break;
case SRAI:
if (instr->Bit(16))
- Format(instr, "srai.d 'rd, 'rj, 'ui6");
+ Format(instr, "srai.d 'rd, 'rj, 'ui6");
else
- Format(instr, "srai.w 'rd, 'rj, 'ui5");
+ Format(instr, "srai.w 'rd, 'rj, 'ui5");
break;
case ROTRI:
if (instr->Bit(16))
- Format(instr, "rotri.d 'rd, 'rj, 'ui6");
+ Format(instr, "rotri.d 'rd, 'rj, 'ui6");
else
- Format(instr, "rotri.w 'rd, 'rj, 'ui5");
+ Format(instr, "rotri.w 'rd, 'rj, 'ui5");
break;
default:
UNREACHABLE();
@@ -1009,213 +1023,213 @@ void Decoder::DecodeTypekOp14(Instruction* instr) {
int Decoder::DecodeTypekOp17(Instruction* instr) {
switch (instr->Bits(31, 15) << 15) {
case ADD_W:
- Format(instr, "add.w 'rd, 'rj, 'rk");
+ Format(instr, "add.w 'rd, 'rj, 'rk");
break;
case ADD_D:
- Format(instr, "add.d 'rd, 'rj, 'rk");
+ Format(instr, "add.d 'rd, 'rj, 'rk");
break;
case SUB_W:
- Format(instr, "sub.w 'rd, 'rj, 'rk");
+ Format(instr, "sub.w 'rd, 'rj, 'rk");
break;
case SUB_D:
- Format(instr, "sub.d 'rd, 'rj, 'rk");
+ Format(instr, "sub.d 'rd, 'rj, 'rk");
break;
case SLT:
- Format(instr, "slt 'rd, 'rj, 'rk");
+ Format(instr, "slt 'rd, 'rj, 'rk");
break;
case SLTU:
- Format(instr, "sltu 'rd, 'rj, 'rk");
+ Format(instr, "sltu 'rd, 'rj, 'rk");
break;
case MASKEQZ:
- Format(instr, "maskeqz 'rd, 'rj, 'rk");
+ Format(instr, "maskeqz 'rd, 'rj, 'rk");
break;
case MASKNEZ:
- Format(instr, "masknez 'rd, 'rj, 'rk");
+ Format(instr, "masknez 'rd, 'rj, 'rk");
break;
case NOR:
- Format(instr, "nor 'rd, 'rj, 'rk");
+ Format(instr, "nor 'rd, 'rj, 'rk");
break;
case AND:
- Format(instr, "and 'rd, 'rj, 'rk");
+ Format(instr, "and 'rd, 'rj, 'rk");
break;
case OR:
- Format(instr, "or 'rd, 'rj, 'rk");
+ Format(instr, "or 'rd, 'rj, 'rk");
break;
case XOR:
- Format(instr, "xor 'rd, 'rj, 'rk");
+ Format(instr, "xor 'rd, 'rj, 'rk");
break;
case ORN:
- Format(instr, "orn 'rd, 'rj, 'rk");
+ Format(instr, "orn 'rd, 'rj, 'rk");
break;
case ANDN:
- Format(instr, "andn 'rd, 'rj, 'rk");
+ Format(instr, "andn 'rd, 'rj, 'rk");
break;
case SLL_W:
- Format(instr, "sll.w 'rd, 'rj, 'rk");
+ Format(instr, "sll.w 'rd, 'rj, 'rk");
break;
case SRL_W:
- Format(instr, "srl.w 'rd, 'rj, 'rk");
+ Format(instr, "srl.w 'rd, 'rj, 'rk");
break;
case SRA_W:
- Format(instr, "sra.w 'rd, 'rj, 'rk");
+ Format(instr, "sra.w 'rd, 'rj, 'rk");
break;
case SLL_D:
- Format(instr, "sll.d 'rd, 'rj, 'rk");
+ Format(instr, "sll.d 'rd, 'rj, 'rk");
break;
case SRL_D:
- Format(instr, "srl.d 'rd, 'rj, 'rk");
+ Format(instr, "srl.d 'rd, 'rj, 'rk");
break;
case SRA_D:
- Format(instr, "sra.d 'rd, 'rj, 'rk");
+ Format(instr, "sra.d 'rd, 'rj, 'rk");
break;
case ROTR_D:
- Format(instr, "rotr.d 'rd, 'rj, 'rk");
+ Format(instr, "rotr.d 'rd, 'rj, 'rk");
break;
case ROTR_W:
- Format(instr, "rotr.w 'rd, 'rj, 'rk");
+ Format(instr, "rotr.w 'rd, 'rj, 'rk");
break;
case MUL_W:
- Format(instr, "mul.w 'rd, 'rj, 'rk");
+ Format(instr, "mul.w 'rd, 'rj, 'rk");
break;
case MULH_W:
- Format(instr, "mulh.w 'rd, 'rj, 'rk");
+ Format(instr, "mulh.w 'rd, 'rj, 'rk");
break;
case MULH_WU:
- Format(instr, "mulh.wu 'rd, 'rj, 'rk");
+ Format(instr, "mulh.wu 'rd, 'rj, 'rk");
break;
case MUL_D:
- Format(instr, "mul.d 'rd, 'rj, 'rk");
+ Format(instr, "mul.d 'rd, 'rj, 'rk");
break;
case MULH_D:
- Format(instr, "mulh.d 'rd, 'rj, 'rk");
+ Format(instr, "mulh.d 'rd, 'rj, 'rk");
break;
case MULH_DU:
- Format(instr, "mulh.du 'rd, 'rj, 'rk");
+ Format(instr, "mulh.du 'rd, 'rj, 'rk");
break;
case MULW_D_W:
Format(instr, "mulw.d.w 'rd, 'rj, 'rk");
break;
case MULW_D_WU:
- Format(instr, "mulw.d.wu 'rd, 'rj, 'rk");
+ Format(instr, "mulw.d.wu 'rd, 'rj, 'rk");
break;
case DIV_W:
- Format(instr, "div.w 'rd, 'rj, 'rk");
+ Format(instr, "div.w 'rd, 'rj, 'rk");
break;
case MOD_W:
- Format(instr, "mod.w 'rd, 'rj, 'rk");
+ Format(instr, "mod.w 'rd, 'rj, 'rk");
break;
case DIV_WU:
- Format(instr, "div.wu 'rd, 'rj, 'rk");
+ Format(instr, "div.wu 'rd, 'rj, 'rk");
break;
case MOD_WU:
- Format(instr, "mod.wu 'rd, 'rj, 'rk");
+ Format(instr, "mod.wu 'rd, 'rj, 'rk");
break;
case DIV_D:
- Format(instr, "div.d 'rd, 'rj, 'rk");
+ Format(instr, "div.d 'rd, 'rj, 'rk");
break;
case MOD_D:
- Format(instr, "mod.d 'rd, 'rj, 'rk");
+ Format(instr, "mod.d 'rd, 'rj, 'rk");
break;
case DIV_DU:
- Format(instr, "div.du 'rd, 'rj, 'rk");
+ Format(instr, "div.du 'rd, 'rj, 'rk");
break;
case MOD_DU:
- Format(instr, "mod.du 'rd, 'rj, 'rk");
+ Format(instr, "mod.du 'rd, 'rj, 'rk");
break;
case BREAK:
return DecodeBreakInstr(instr);
case FADD_S:
- Format(instr, "fadd.s 'fd, 'fj, 'fk");
+ Format(instr, "fadd.s 'fd, 'fj, 'fk");
break;
case FADD_D:
- Format(instr, "fadd.d 'fd, 'fj, 'fk");
+ Format(instr, "fadd.d 'fd, 'fj, 'fk");
break;
case FSUB_S:
- Format(instr, "fsub.s 'fd, 'fj, 'fk");
+ Format(instr, "fsub.s 'fd, 'fj, 'fk");
break;
case FSUB_D:
- Format(instr, "fsub.d 'fd, 'fj, 'fk");
+ Format(instr, "fsub.d 'fd, 'fj, 'fk");
break;
case FMUL_S:
- Format(instr, "fmul.s 'fd, 'fj, 'fk");
+ Format(instr, "fmul.s 'fd, 'fj, 'fk");
break;
case FMUL_D:
- Format(instr, "fmul.d 'fd, 'fj, 'fk");
+ Format(instr, "fmul.d 'fd, 'fj, 'fk");
break;
case FDIV_S:
- Format(instr, "fdiv.s 'fd, 'fj, 'fk");
+ Format(instr, "fdiv.s 'fd, 'fj, 'fk");
break;
case FDIV_D:
- Format(instr, "fdiv.d 'fd, 'fj, 'fk");
+ Format(instr, "fdiv.d 'fd, 'fj, 'fk");
break;
case FMAX_S:
- Format(instr, "fmax.s 'fd, 'fj, 'fk");
+ Format(instr, "fmax.s 'fd, 'fj, 'fk");
break;
case FMAX_D:
- Format(instr, "fmax.d 'fd, 'fj, 'fk");
+ Format(instr, "fmax.d 'fd, 'fj, 'fk");
break;
case FMIN_S:
- Format(instr, "fmin.s 'fd, 'fj, 'fk");
+ Format(instr, "fmin.s 'fd, 'fj, 'fk");
break;
case FMIN_D:
- Format(instr, "fmin.d 'fd, 'fj, 'fk");
+ Format(instr, "fmin.d 'fd, 'fj, 'fk");
break;
case FMAXA_S:
- Format(instr, "fmaxa.s 'fd, 'fj, 'fk");
+ Format(instr, "fmaxa.s 'fd, 'fj, 'fk");
break;
case FMAXA_D:
- Format(instr, "fmaxa.d 'fd, 'fj, 'fk");
+ Format(instr, "fmaxa.d 'fd, 'fj, 'fk");
break;
case FMINA_S:
- Format(instr, "fmina.s 'fd, 'fj, 'fk");
+ Format(instr, "fmina.s 'fd, 'fj, 'fk");
break;
case FMINA_D:
- Format(instr, "fmina.d 'fd, 'fj, 'fk");
+ Format(instr, "fmina.d 'fd, 'fj, 'fk");
break;
case LDX_B:
- Format(instr, "ldx.b 'rd, 'rj, 'rk");
+ Format(instr, "ldx.b 'rd, 'rj, 'rk");
break;
case LDX_H:
- Format(instr, "ldx.h 'rd, 'rj, 'rk");
+ Format(instr, "ldx.h 'rd, 'rj, 'rk");
break;
case LDX_W:
- Format(instr, "ldx.w 'rd, 'rj, 'rk");
+ Format(instr, "ldx.w 'rd, 'rj, 'rk");
break;
case LDX_D:
- Format(instr, "ldx.d 'rd, 'rj, 'rk");
+ Format(instr, "ldx.d 'rd, 'rj, 'rk");
break;
case STX_B:
- Format(instr, "stx.b 'rd, 'rj, 'rk");
+ Format(instr, "stx.b 'rd, 'rj, 'rk");
break;
case STX_H:
- Format(instr, "stx.h 'rd, 'rj, 'rk");
+ Format(instr, "stx.h 'rd, 'rj, 'rk");
break;
case STX_W:
- Format(instr, "stx.w 'rd, 'rj, 'rk");
+ Format(instr, "stx.w 'rd, 'rj, 'rk");
break;
case STX_D:
- Format(instr, "stx.d 'rd, 'rj, 'rk");
+ Format(instr, "stx.d 'rd, 'rj, 'rk");
break;
case LDX_BU:
- Format(instr, "ldx.bu 'rd, 'rj, 'rk");
+ Format(instr, "ldx.bu 'rd, 'rj, 'rk");
break;
case LDX_HU:
- Format(instr, "ldx.hu 'rd, 'rj, 'rk");
+ Format(instr, "ldx.hu 'rd, 'rj, 'rk");
break;
case LDX_WU:
- Format(instr, "ldx.wu 'rd, 'rj, 'rk");
+ Format(instr, "ldx.wu 'rd, 'rj, 'rk");
break;
case FLDX_S:
- Format(instr, "fldx.s 'fd, 'rj, 'rk");
+ Format(instr, "fldx.s 'fd, 'rj, 'rk");
break;
case FLDX_D:
- Format(instr, "fldx.d 'fd, 'rj, 'rk");
+ Format(instr, "fldx.d 'fd, 'rj, 'rk");
break;
case FSTX_S:
- Format(instr, "fstx.s 'fd, 'rj, 'rk");
+ Format(instr, "fstx.s 'fd, 'rj, 'rk");
break;
case FSTX_D:
- Format(instr, "fstx.d 'fd, 'rj, 'rk");
+ Format(instr, "fstx.d 'fd, 'rj, 'rk");
break;
case AMSWAP_W:
Format(instr, "amswap.w 'rd, 'rk, 'rj");
@@ -1224,40 +1238,40 @@ int Decoder::DecodeTypekOp17(Instruction* instr) {
Format(instr, "amswap.d 'rd, 'rk, 'rj");
break;
case AMADD_W:
- Format(instr, "amadd.w 'rd, 'rk, 'rj");
+ Format(instr, "amadd.w 'rd, 'rk, 'rj");
break;
case AMADD_D:
- Format(instr, "amadd.d 'rd, 'rk, 'rj");
+ Format(instr, "amadd.d 'rd, 'rk, 'rj");
break;
case AMAND_W:
- Format(instr, "amand.w 'rd, 'rk, 'rj");
+ Format(instr, "amand.w 'rd, 'rk, 'rj");
break;
case AMAND_D:
- Format(instr, "amand.d 'rd, 'rk, 'rj");
+ Format(instr, "amand.d 'rd, 'rk, 'rj");
break;
case AMOR_W:
- Format(instr, "amor.w 'rd, 'rk, 'rj");
+ Format(instr, "amor.w 'rd, 'rk, 'rj");
break;
case AMOR_D:
- Format(instr, "amor.d 'rd, 'rk, 'rj");
+ Format(instr, "amor.d 'rd, 'rk, 'rj");
break;
case AMXOR_W:
- Format(instr, "amxor.w 'rd, 'rk, 'rj");
+ Format(instr, "amxor.w 'rd, 'rk, 'rj");
break;
case AMXOR_D:
- Format(instr, "amxor.d 'rd, 'rk, 'rj");
+ Format(instr, "amxor.d 'rd, 'rk, 'rj");
break;
case AMMAX_W:
- Format(instr, "ammax.w 'rd, 'rk, 'rj");
+ Format(instr, "ammax.w 'rd, 'rk, 'rj");
break;
case AMMAX_D:
- Format(instr, "ammax.d 'rd, 'rk, 'rj");
+ Format(instr, "ammax.d 'rd, 'rk, 'rj");
break;
case AMMIN_W:
- Format(instr, "ammin.w 'rd, 'rk, 'rj");
+ Format(instr, "ammin.w 'rd, 'rk, 'rj");
break;
case AMMIN_D:
- Format(instr, "ammin.d 'rd, 'rk, 'rj");
+ Format(instr, "ammin.d 'rd, 'rk, 'rj");
break;
case AMMAX_WU:
Format(instr, "ammax.wu 'rd, 'rk, 'rj");
@@ -1272,76 +1286,76 @@ int Decoder::DecodeTypekOp17(Instruction* instr) {
Format(instr, "ammin.du 'rd, 'rk, 'rj");
break;
case AMSWAP_DB_W:
- Format(instr, "amswap_db.w 'rd, 'rk, 'rj");
+ Format(instr, "amswap_db.w 'rd, 'rk, 'rj");
break;
case AMSWAP_DB_D:
- Format(instr, "amswap_db.d 'rd, 'rk, 'rj");
+ Format(instr, "amswap_db.d 'rd, 'rk, 'rj");
break;
case AMADD_DB_W:
- Format(instr, "amadd_db.w 'rd, 'rk, 'rj");
+ Format(instr, "amadd_db.w 'rd, 'rk, 'rj");
break;
case AMADD_DB_D:
- Format(instr, "amadd_db.d 'rd, 'rk, 'rj");
+ Format(instr, "amadd_db.d 'rd, 'rk, 'rj");
break;
case AMAND_DB_W:
- Format(instr, "amand_db.w 'rd, 'rk, 'rj");
+ Format(instr, "amand_db.w 'rd, 'rk, 'rj");
break;
case AMAND_DB_D:
- Format(instr, "amand_db.d 'rd, 'rk, 'rj");
+ Format(instr, "amand_db.d 'rd, 'rk, 'rj");
break;
case AMOR_DB_W:
- Format(instr, "amor_db.w 'rd, 'rk, 'rj");
+ Format(instr, "amor_db.w 'rd, 'rk, 'rj");
break;
case AMOR_DB_D:
- Format(instr, "amor_db.d 'rd, 'rk, 'rj");
+ Format(instr, "amor_db.d 'rd, 'rk, 'rj");
break;
case AMXOR_DB_W:
- Format(instr, "amxor_db.w 'rd, 'rk, 'rj");
+ Format(instr, "amxor_db.w 'rd, 'rk, 'rj");
break;
case AMXOR_DB_D:
- Format(instr, "amxor_db.d 'rd, 'rk, 'rj");
+ Format(instr, "amxor_db.d 'rd, 'rk, 'rj");
break;
case AMMAX_DB_W:
- Format(instr, "ammax_db.w 'rd, 'rk, 'rj");
+ Format(instr, "ammax_db.w 'rd, 'rk, 'rj");
break;
case AMMAX_DB_D:
- Format(instr, "ammax_db.d 'rd, 'rk, 'rj");
+ Format(instr, "ammax_db.d 'rd, 'rk, 'rj");
break;
case AMMIN_DB_W:
- Format(instr, "ammin_db.w 'rd, 'rk, 'rj");
+ Format(instr, "ammin_db.w 'rd, 'rk, 'rj");
break;
case AMMIN_DB_D:
- Format(instr, "ammin_db.d 'rd, 'rk, 'rj");
+ Format(instr, "ammin_db.d 'rd, 'rk, 'rj");
break;
case AMMAX_DB_WU:
- Format(instr, "ammax_db.wu 'rd, 'rk, 'rj");
+ Format(instr, "ammax_db.wu 'rd, 'rk, 'rj");
break;
case AMMAX_DB_DU:
- Format(instr, "ammax_db.du 'rd, 'rk, 'rj");
+ Format(instr, "ammax_db.du 'rd, 'rk, 'rj");
break;
case AMMIN_DB_WU:
- Format(instr, "ammin_db.wu 'rd, 'rk, 'rj");
+ Format(instr, "ammin_db.wu 'rd, 'rk, 'rj");
break;
case AMMIN_DB_DU:
- Format(instr, "ammin_db.du 'rd, 'rk, 'rj");
+ Format(instr, "ammin_db.du 'rd, 'rk, 'rj");
break;
case DBAR:
- Format(instr, "dbar 'hint15");
+ Format(instr, "dbar 'hint15");
break;
case IBAR:
- Format(instr, "ibar 'hint15");
+ Format(instr, "ibar 'hint15");
break;
case FSCALEB_S:
- Format(instr, "fscaleb.s 'fd, 'fj, 'fk");
+ Format(instr, "fscaleb.s 'fd, 'fj, 'fk");
break;
case FSCALEB_D:
- Format(instr, "fscaleb.d 'fd, 'fj, 'fk");
+ Format(instr, "fscaleb.d 'fd, 'fj, 'fk");
break;
case FCOPYSIGN_S:
- Format(instr, "fcopysign.s 'fd, 'fj, 'fk");
+ Format(instr, "fcopysign.s 'fd, 'fj, 'fk");
break;
case FCOPYSIGN_D:
- Format(instr, "fcopysign.d 'fd, 'fj, 'fk");
+ Format(instr, "fcopysign.d 'fd, 'fj, 'fk");
break;
default:
UNREACHABLE();
@@ -1352,40 +1366,40 @@ int Decoder::DecodeTypekOp17(Instruction* instr) {
void Decoder::DecodeTypekOp22(Instruction* instr) {
switch (instr->Bits(31, 10) << 10) {
case CLZ_W:
- Format(instr, "clz.w 'rd, 'rj");
+ Format(instr, "clz.w 'rd, 'rj");
break;
case CTZ_W:
- Format(instr, "ctz.w 'rd, 'rj");
+ Format(instr, "ctz.w 'rd, 'rj");
break;
case CLZ_D:
- Format(instr, "clz.d 'rd, 'rj");
+ Format(instr, "clz.d 'rd, 'rj");
break;
case CTZ_D:
- Format(instr, "ctz.d 'rd, 'rj");
+ Format(instr, "ctz.d 'rd, 'rj");
break;
case REVB_2H:
- Format(instr, "revb.2h 'rd, 'rj");
+ Format(instr, "revb.2h 'rd, 'rj");
break;
case REVB_4H:
- Format(instr, "revb.4h 'rd, 'rj");
+ Format(instr, "revb.4h 'rd, 'rj");
break;
case REVB_2W:
- Format(instr, "revb.2w 'rd, 'rj");
+ Format(instr, "revb.2w 'rd, 'rj");
break;
case REVB_D:
- Format(instr, "revb.d 'rd, 'rj");
+ Format(instr, "revb.d 'rd, 'rj");
break;
case REVH_2W:
- Format(instr, "revh.2w 'rd, 'rj");
+ Format(instr, "revh.2w 'rd, 'rj");
break;
case REVH_D:
- Format(instr, "revh.d 'rd, 'rj");
+ Format(instr, "revh.d 'rd, 'rj");
break;
case BITREV_4B:
- Format(instr, "bitrev.4b 'rd, 'rj");
+ Format(instr, "bitrev.4b 'rd, 'rj");
break;
case BITREV_8B:
- Format(instr, "bitrev.8b 'rd, 'rj");
+ Format(instr, "bitrev.8b 'rd, 'rj");
break;
case BITREV_W:
Format(instr, "bitrev.w 'rd, 'rj");
@@ -1394,58 +1408,58 @@ void Decoder::DecodeTypekOp22(Instruction* instr) {
Format(instr, "bitrev.d 'rd, 'rj");
break;
case EXT_W_B:
- Format(instr, "ext.w.b 'rd, 'rj");
+ Format(instr, "ext.w.b 'rd, 'rj");
break;
case EXT_W_H:
- Format(instr, "ext.w.h 'rd, 'rj");
+ Format(instr, "ext.w.h 'rd, 'rj");
break;
case FABS_S:
- Format(instr, "fabs.s 'fd, 'fj");
+ Format(instr, "fabs.s 'fd, 'fj");
break;
case FABS_D:
- Format(instr, "fabs.d 'fd, 'fj");
+ Format(instr, "fabs.d 'fd, 'fj");
break;
case FNEG_S:
- Format(instr, "fneg.s 'fd, 'fj");
+ Format(instr, "fneg.s 'fd, 'fj");
break;
case FNEG_D:
- Format(instr, "fneg.d 'fd, 'fj");
+ Format(instr, "fneg.d 'fd, 'fj");
break;
case FSQRT_S:
- Format(instr, "fsqrt.s 'fd, 'fj");
+ Format(instr, "fsqrt.s 'fd, 'fj");
break;
case FSQRT_D:
- Format(instr, "fsqrt.d 'fd, 'fj");
+ Format(instr, "fsqrt.d 'fd, 'fj");
break;
case FMOV_S:
- Format(instr, "fmov.s 'fd, 'fj");
+ Format(instr, "fmov.s 'fd, 'fj");
break;
case FMOV_D:
- Format(instr, "fmov.d 'fd, 'fj");
+ Format(instr, "fmov.d 'fd, 'fj");
break;
case MOVGR2FR_W:
- Format(instr, "movgr2fr.w 'fd, 'rj");
+ Format(instr, "movgr2fr.w 'fd, 'rj");
break;
case MOVGR2FR_D:
- Format(instr, "movgr2fr.d 'fd, 'rj");
+ Format(instr, "movgr2fr.d 'fd, 'rj");
break;
case MOVGR2FRH_W:
- Format(instr, "movgr2frh.w 'fd, 'rj");
+ Format(instr, "movgr2frh.w 'fd, 'rj");
break;
case MOVFR2GR_S:
- Format(instr, "movfr2gr.s 'rd, 'fj");
+ Format(instr, "movfr2gr.s 'rd, 'fj");
break;
case MOVFR2GR_D:
- Format(instr, "movfr2gr.d 'rd, 'fj");
+ Format(instr, "movfr2gr.d 'rd, 'fj");
break;
case MOVFRH2GR_S:
- Format(instr, "movfrh2gr.s 'rd, 'fj");
+ Format(instr, "movfrh2gr.s 'rd, 'fj");
break;
case MOVGR2FCSR:
- Format(instr, "movgr2fcsr fcsr, 'rj");
+ Format(instr, "movgr2fcsr fcsr, 'rj");
break;
case MOVFCSR2GR:
- Format(instr, "movfcsr2gr 'rd, fcsr");
+ Format(instr, "movfcsr2gr 'rd, fcsr");
break;
case FCVT_S_D:
Format(instr, "fcvt.s.d 'fd, 'fj");
@@ -1454,82 +1468,82 @@ void Decoder::DecodeTypekOp22(Instruction* instr) {
Format(instr, "fcvt.d.s 'fd, 'fj");
break;
case FTINTRM_W_S:
- Format(instr, "ftintrm.w.s 'fd, 'fj");
+ Format(instr, "ftintrm.w.s 'fd, 'fj");
break;
case FTINTRM_W_D:
- Format(instr, "ftintrm.w.d 'fd, 'fj");
+ Format(instr, "ftintrm.w.d 'fd, 'fj");
break;
case FTINTRM_L_S:
- Format(instr, "ftintrm.l.s 'fd, 'fj");
+ Format(instr, "ftintrm.l.s 'fd, 'fj");
break;
case FTINTRM_L_D:
- Format(instr, "ftintrm.l.d 'fd, 'fj");
+ Format(instr, "ftintrm.l.d 'fd, 'fj");
break;
case FTINTRP_W_S:
- Format(instr, "ftintrp.w.s 'fd, 'fj");
+ Format(instr, "ftintrp.w.s 'fd, 'fj");
break;
case FTINTRP_W_D:
- Format(instr, "ftintrp.w.d 'fd, 'fj");
+ Format(instr, "ftintrp.w.d 'fd, 'fj");
break;
case FTINTRP_L_S:
- Format(instr, "ftintrp.l.s 'fd, 'fj");
+ Format(instr, "ftintrp.l.s 'fd, 'fj");
break;
case FTINTRP_L_D:
- Format(instr, "ftintrp.l.d 'fd, 'fj");
+ Format(instr, "ftintrp.l.d 'fd, 'fj");
break;
case FTINTRZ_W_S:
- Format(instr, "ftintrz.w.s 'fd, 'fj");
+ Format(instr, "ftintrz.w.s 'fd, 'fj");
break;
case FTINTRZ_W_D:
- Format(instr, "ftintrz.w.d 'fd, 'fj");
+ Format(instr, "ftintrz.w.d 'fd, 'fj");
break;
case FTINTRZ_L_S:
- Format(instr, "ftintrz.l.s 'fd, 'fj");
+ Format(instr, "ftintrz.l.s 'fd, 'fj");
break;
case FTINTRZ_L_D:
- Format(instr, "ftintrz.l.d 'fd, 'fj");
+ Format(instr, "ftintrz.l.d 'fd, 'fj");
break;
case FTINTRNE_W_S:
- Format(instr, "ftintrne.w.s 'fd, 'fj");
+ Format(instr, "ftintrne.w.s 'fd, 'fj");
break;
case FTINTRNE_W_D:
- Format(instr, "ftintrne.w.d 'fd, 'fj");
+ Format(instr, "ftintrne.w.d 'fd, 'fj");
break;
case FTINTRNE_L_S:
- Format(instr, "ftintrne.l.s 'fd, 'fj");
+ Format(instr, "ftintrne.l.s 'fd, 'fj");
break;
case FTINTRNE_L_D:
- Format(instr, "ftintrne.l.d 'fd, 'fj");
+ Format(instr, "ftintrne.l.d 'fd, 'fj");
break;
case FTINT_W_S:
- Format(instr, "ftint.w.s 'fd, 'fj");
+ Format(instr, "ftint.w.s 'fd, 'fj");
break;
case FTINT_W_D:
- Format(instr, "ftint.w.d 'fd, 'fj");
+ Format(instr, "ftint.w.d 'fd, 'fj");
break;
case FTINT_L_S:
- Format(instr, "ftint.l.s 'fd, 'fj");
+ Format(instr, "ftint.l.s 'fd, 'fj");
break;
case FTINT_L_D:
- Format(instr, "ftint.l.d 'fd, 'fj");
+ Format(instr, "ftint.l.d 'fd, 'fj");
break;
case FFINT_S_W:
- Format(instr, "ffint.s.w 'fd, 'fj");
+ Format(instr, "ffint.s.w 'fd, 'fj");
break;
case FFINT_S_L:
- Format(instr, "ffint.s.l 'fd, 'fj");
+ Format(instr, "ffint.s.l 'fd, 'fj");
break;
case FFINT_D_W:
- Format(instr, "ffint.d.w 'fd, 'fj");
+ Format(instr, "ffint.d.w 'fd, 'fj");
break;
case FFINT_D_L:
- Format(instr, "ffint.d.l 'fd, 'fj");
+ Format(instr, "ffint.d.l 'fd, 'fj");
break;
case FRINT_S:
- Format(instr, "frint.s 'fd, 'fj");
+ Format(instr, "frint.s 'fd, 'fj");
break;
case FRINT_D:
- Format(instr, "frint.d 'fd, 'fj");
+ Format(instr, "frint.d 'fd, 'fj");
break;
case MOVFR2CF:
Format(instr, "movfr2cf fcc'cd, 'fj");
@@ -1562,22 +1576,22 @@ void Decoder::DecodeTypekOp22(Instruction* instr) {
Format(instr, "fclass.d 'fd, 'fj");
break;
case FLOGB_S:
- Format(instr, "flogb.s 'fd, 'fj");
+ Format(instr, "flogb.s 'fd, 'fj");
break;
case FLOGB_D:
- Format(instr, "flogb.d 'fd, 'fj");
+ Format(instr, "flogb.d 'fd, 'fj");
break;
case CLO_W:
- Format(instr, "clo.w 'rd, 'rj");
+ Format(instr, "clo.w 'rd, 'rj");
break;
case CTO_W:
- Format(instr, "cto.w 'rd, 'rj");
+ Format(instr, "cto.w 'rd, 'rj");
break;
case CLO_D:
- Format(instr, "clo.d 'rd, 'rj");
+ Format(instr, "clo.d 'rd, 'rj");
break;
case CTO_D:
- Format(instr, "cto.d 'rd, 'rj");
+ Format(instr, "cto.d 'rd, 'rj");
break;
default:
UNREACHABLE();
diff --git a/deps/v8/src/diagnostics/objects-debug.cc b/deps/v8/src/diagnostics/objects-debug.cc
index a74548e949..de003a4a54 100644
--- a/deps/v8/src/diagnostics/objects-debug.cc
+++ b/deps/v8/src/diagnostics/objects-debug.cc
@@ -166,7 +166,9 @@ void TaggedIndex::TaggedIndexVerify(Isolate* isolate) {
}
void HeapObject::HeapObjectVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::HeapObjectVerify(*this, isolate);
+ CHECK(IsHeapObject());
+ VerifyPointer(isolate, map(isolate));
+ CHECK(map(isolate).IsMap());
switch (map().instance_type()) {
#define STRING_TYPE_CASE(TYPE, size, name, CamelName) case TYPE:
@@ -293,6 +295,7 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
BigIntBase::cast(*this).BigIntBaseVerify(isolate);
break;
+ case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_PROMISE_CONSTRUCTOR_TYPE:
case JS_REG_EXP_CONSTRUCTOR_TYPE:
case JS_ARRAY_CONSTRUCTOR_TYPE:
@@ -417,7 +420,7 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
for (InternalIndex i : map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
DCHECK_EQ(kData, details.kind());
Representation r = details.representation();
FieldIndex index = FieldIndex::ForDescriptor(map(), i);
@@ -652,7 +655,7 @@ void DescriptorArray::DescriptorArrayVerify(Isolate* isolate) {
}
MaybeObject value = GetValue(descriptor);
HeapObject heap_object;
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
CHECK_EQ(details.field_index(), expected_field_index);
CHECK(
value == MaybeObject::FromObject(FieldType::None()) ||
@@ -826,7 +829,24 @@ void JSBoundFunction::JSBoundFunctionVerify(Isolate* isolate) {
}
void JSFunction::JSFunctionVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSFunctionVerify(*this, isolate);
+ // Don't call TorqueGeneratedClassVerifiers::JSFunctionVerify here because the
+ // Torque class definition contains the field `prototype_or_initial_map` which
+ // may not be allocated.
+
+ // This assertion exists to encourage updating this verification function if
+ // new fields are added in the Torque class layout definition.
+ STATIC_ASSERT(JSFunction::TorqueGeneratedClass::kHeaderSize ==
+ 8 * kTaggedSize);
+
+ JSFunctionOrBoundFunctionVerify(isolate);
+ CHECK(IsJSFunction());
+ VerifyPointer(isolate, shared(isolate));
+ CHECK(shared(isolate).IsSharedFunctionInfo());
+ VerifyPointer(isolate, context(isolate, kRelaxedLoad));
+ CHECK(context(isolate, kRelaxedLoad).IsContext());
+ VerifyPointer(isolate, raw_feedback_cell(isolate));
+ CHECK(raw_feedback_cell(isolate).IsFeedbackCell());
+ VerifyPointer(isolate, raw_code(isolate));
CHECK(raw_code(isolate).IsCodeT());
CHECK(map(isolate).is_callable());
Handle<JSFunction> function(*this, isolate);
@@ -1229,8 +1249,9 @@ void SmallOrderedHashTable<Derived>::SmallOrderedHashTableVerify(
}
}
}
+
void SmallOrderedHashMap::SmallOrderedHashMapVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::SmallOrderedHashMapVerify(*this, isolate);
+ CHECK(IsSmallOrderedHashMap());
SmallOrderedHashTable<SmallOrderedHashMap>::SmallOrderedHashTableVerify(
isolate);
for (int entry = NumberOfElements(); entry < NumberOfDeletedElements();
@@ -1243,7 +1264,7 @@ void SmallOrderedHashMap::SmallOrderedHashMapVerify(Isolate* isolate) {
}
void SmallOrderedHashSet::SmallOrderedHashSetVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::SmallOrderedHashSetVerify(*this, isolate);
+ CHECK(IsSmallOrderedHashSet());
SmallOrderedHashTable<SmallOrderedHashSet>::SmallOrderedHashTableVerify(
isolate);
for (int entry = NumberOfElements(); entry < NumberOfDeletedElements();
@@ -1257,8 +1278,7 @@ void SmallOrderedHashSet::SmallOrderedHashSetVerify(Isolate* isolate) {
void SmallOrderedNameDictionary::SmallOrderedNameDictionaryVerify(
Isolate* isolate) {
- TorqueGeneratedClassVerifiers::SmallOrderedNameDictionaryVerify(*this,
- isolate);
+ CHECK(IsSmallOrderedNameDictionary());
SmallOrderedHashTable<
SmallOrderedNameDictionary>::SmallOrderedHashTableVerify(isolate);
for (int entry = NumberOfElements(); entry < NumberOfDeletedElements();
@@ -1348,7 +1368,7 @@ void SwissNameDictionary::SwissNameDictionaryVerify(Isolate* isolate,
void JSRegExp::JSRegExpVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSRegExpVerify(*this, isolate);
- switch (TypeTag()) {
+ switch (type_tag()) {
case JSRegExp::ATOM: {
FixedArray arr = FixedArray::cast(data());
CHECK(arr.get(JSRegExp::kAtomPatternIndex).IsString());
@@ -1426,7 +1446,7 @@ void JSRegExp::JSRegExpVerify(Isolate* isolate) {
break;
}
default:
- CHECK_EQ(JSRegExp::NOT_COMPILED, TypeTag());
+ CHECK_EQ(JSRegExp::NOT_COMPILED, type_tag());
CHECK(data().IsUndefined(isolate));
break;
}
@@ -1654,9 +1674,20 @@ void WasmExportedFunctionData::WasmExportedFunctionDataVerify(
#endif // V8_ENABLE_WEBASSEMBLY
void DataHandler::DataHandlerVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::DataHandlerVerify(*this, isolate);
+ // Don't call TorqueGeneratedClassVerifiers::DataHandlerVerify because the
+ // Torque definition of this class includes all of the optional fields.
+
+ // This assertion exists to encourage updating this verification function if
+ // new fields are added in the Torque class layout definition.
+ STATIC_ASSERT(DataHandler::kHeaderSize == 6 * kTaggedSize);
+
+ StructVerify(isolate);
+ CHECK(IsDataHandler());
+ VerifyPointer(isolate, smi_handler(isolate));
CHECK_IMPLIES(!smi_handler().IsSmi(),
IsStoreHandler() && smi_handler().IsCodeT());
+ VerifyPointer(isolate, validity_cell(isolate));
+ CHECK(validity_cell().IsSmi() || validity_cell().IsCell());
int data_count = data_field_count();
if (data_count >= 1) {
VerifyMaybeObjectField(isolate, kData1Offset);
diff --git a/deps/v8/src/diagnostics/objects-printer.cc b/deps/v8/src/diagnostics/objects-printer.cc
index f8e967dbf1..8a98a152db 100644
--- a/deps/v8/src/diagnostics/objects-printer.cc
+++ b/deps/v8/src/diagnostics/objects-printer.cc
@@ -234,6 +234,7 @@ void HeapObject::HeapObjectPrint(std::ostream& os) {
case BIG_INT_BASE_TYPE:
BigIntBase::cast(*this).BigIntBasePrint(os);
break;
+ case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_PROMISE_CONSTRUCTOR_TYPE:
case JS_REG_EXP_CONSTRUCTOR_TYPE:
case JS_ARRAY_CONSTRUCTOR_TYPE:
@@ -295,18 +296,18 @@ bool JSObject::PrintProperties(std::ostream& os) {
os << ": ";
PropertyDetails details = descs.GetDetails(i);
switch (details.location()) {
- case kField: {
+ case PropertyLocation::kField: {
FieldIndex field_index = FieldIndex::ForDescriptor(map(), i);
os << Brief(RawFastPropertyAt(field_index));
break;
}
- case kDescriptor:
+ case PropertyLocation::kDescriptor:
os << Brief(descs.GetStrongValue(i));
break;
}
os << " ";
details.PrintAsFastTo(os, PropertyDetails::kForProperties);
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
int field_index = details.field_index();
if (field_index < nof_inobject_properties) {
os << ", location: in-object";
@@ -821,10 +822,15 @@ namespace {
void PrintContextWithHeader(std::ostream& os, Context context,
const char* type) {
context.PrintHeader(os, type);
- os << "\n - length: " << context.length();
+ os << "\n - type: " << context.map().instance_type();
os << "\n - scope_info: " << Brief(context.scope_info());
os << "\n - previous: " << Brief(context.unchecked_previous());
os << "\n - native_context: " << Brief(context.native_context());
+ if (context.scope_info().HasContextExtensionSlot()) {
+ os << "\n - extension: " << context.extension();
+ }
+ os << "\n - length: " << context.length();
+ os << "\n - elements:";
PrintFixedArrayElements(os, context);
os << "\n";
}
@@ -1336,24 +1342,15 @@ void JSDate::JSDatePrint(std::ostream& os) {
JSObjectPrintBody(os, *this);
}
-void JSProxy::JSProxyPrint(std::ostream& os) {
- PrintHeader(os, "JSProxy");
- os << "\n - target: ";
- target().ShortPrint(os);
- os << "\n - handler: ";
- handler().ShortPrint(os);
- os << "\n";
-}
-
void JSSet::JSSetPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSSet");
- os << " - table: " << Brief(table());
+ os << "\n - table: " << Brief(table());
JSObjectPrintBody(os, *this);
}
void JSMap::JSMapPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSMap");
- os << " - table: " << Brief(table());
+ os << "\n - table: " << Brief(table());
JSObjectPrintBody(os, *this);
}
@@ -1373,18 +1370,6 @@ void JSMapIterator::JSMapIteratorPrint(std::ostream& os) {
JSCollectionIteratorPrint(os, "JSMapIterator");
}
-void WeakCell::WeakCellPrint(std::ostream& os) {
- PrintHeader(os, "WeakCell");
- os << "\n - finalization_registry: " << Brief(finalization_registry());
- os << "\n - target: " << Brief(target());
- os << "\n - holdings: " << Brief(holdings());
- os << "\n - prev: " << Brief(prev());
- os << "\n - next: " << Brief(next());
- os << "\n - unregister_token: " << Brief(unregister_token());
- os << "\n - key_list_prev: " << Brief(key_list_prev());
- os << "\n - key_list_next: " << Brief(key_list_next());
-}
-
void JSWeakRef::JSWeakRefPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSWeakRef");
os << "\n - target: " << Brief(target());
@@ -1674,67 +1659,6 @@ void Foreign::ForeignPrint(std::ostream& os) {
os << "\n";
}
-void CallbackTask::CallbackTaskPrint(std::ostream& os) {
- PrintHeader(os, "CallbackTask");
- os << "\n - callback: " << Brief(callback());
- os << "\n - data: " << Brief(data());
- os << "\n";
-}
-
-void CallableTask::CallableTaskPrint(std::ostream& os) {
- PrintHeader(os, "CallableTask");
- os << "\n - context: " << Brief(context());
- os << "\n - callable: " << Brief(callable());
- os << "\n";
-}
-
-void PromiseFulfillReactionJobTask::PromiseFulfillReactionJobTaskPrint(
- std::ostream& os) {
- PrintHeader(os, "PromiseFulfillReactionJobTask");
- os << "\n - argument: " << Brief(argument());
- os << "\n - context: " << Brief(context());
- os << "\n - handler: " << Brief(handler());
- os << "\n - promise_or_capability: " << Brief(promise_or_capability());
- os << "\n";
-}
-
-void PromiseRejectReactionJobTask::PromiseRejectReactionJobTaskPrint(
- std::ostream& os) {
- PrintHeader(os, "PromiseRejectReactionJobTask");
- os << "\n - argument: " << Brief(argument());
- os << "\n - context: " << Brief(context());
- os << "\n - handler: " << Brief(handler());
- os << "\n - promise_or_capability: " << Brief(promise_or_capability());
- os << "\n";
-}
-
-void PromiseResolveThenableJobTask::PromiseResolveThenableJobTaskPrint(
- std::ostream& os) {
- PrintHeader(os, "PromiseResolveThenableJobTask");
- os << "\n - context: " << Brief(context());
- os << "\n - promise_to_resolve: " << Brief(promise_to_resolve());
- os << "\n - then: " << Brief(then());
- os << "\n - thenable: " << Brief(thenable());
- os << "\n";
-}
-
-void PromiseCapability::PromiseCapabilityPrint(std::ostream& os) {
- PrintHeader(os, "PromiseCapability");
- os << "\n - promise: " << Brief(promise());
- os << "\n - resolve: " << Brief(resolve());
- os << "\n - reject: " << Brief(reject());
- os << "\n";
-}
-
-void PromiseReaction::PromiseReactionPrint(std::ostream& os) {
- PrintHeader(os, "PromiseReaction");
- os << "\n - next: " << Brief(next());
- os << "\n - reject_handler: " << Brief(reject_handler());
- os << "\n - fulfill_handler: " << Brief(fulfill_handler());
- os << "\n - promise_or_capability: " << Brief(promise_or_capability());
- os << "\n";
-}
-
void AsyncGeneratorRequest::AsyncGeneratorRequestPrint(std::ostream& os) {
PrintHeader(os, "AsyncGeneratorRequest");
const char* mode = "Invalid!";
@@ -1755,19 +1679,6 @@ void AsyncGeneratorRequest::AsyncGeneratorRequestPrint(std::ostream& os) {
os << "\n";
}
-void SourceTextModuleInfoEntry::SourceTextModuleInfoEntryPrint(
- std::ostream& os) {
- PrintHeader(os, "SourceTextModuleInfoEntry");
- os << "\n - export_name: " << Brief(export_name());
- os << "\n - local_name: " << Brief(local_name());
- os << "\n - import_name: " << Brief(import_name());
- os << "\n - module_request: " << module_request();
- os << "\n - cell_index: " << cell_index();
- os << "\n - beg_pos: " << beg_pos();
- os << "\n - end_pos: " << end_pos();
- os << "\n";
-}
-
static void PrintModuleFields(Module module, std::ostream& os) {
os << "\n - exports: " << Brief(module.exports());
os << "\n - status: " << module.status();
@@ -1798,14 +1709,6 @@ void SourceTextModule::SourceTextModulePrint(std::ostream& os) {
os << "\n";
}
-void SyntheticModule::SyntheticModulePrint(std::ostream& os) {
- PrintHeader(os, "SyntheticModule");
- PrintModuleFields(*this, os);
- os << "\n - export_names: " << Brief(export_names());
- os << "\n - name: " << Brief(name());
- os << "\n";
-}
-
void JSModuleNamespace::JSModuleNamespacePrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSModuleNamespace");
os << "\n - module: " << Brief(module());
@@ -1822,13 +1725,6 @@ void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) {
os << "\n";
}
-void ClassPositions::ClassPositionsPrint(std::ostream& os) {
- PrintHeader(os, "ClassPositions");
- os << "\n - start position: " << start();
- os << "\n - end position: " << end();
- os << "\n";
-}
-
void ArrayBoilerplateDescription::ArrayBoilerplateDescriptionPrint(
std::ostream& os) {
PrintHeader(os, "ArrayBoilerplateDescription");
@@ -1837,15 +1733,6 @@ void ArrayBoilerplateDescription::ArrayBoilerplateDescriptionPrint(
os << "\n";
}
-void RegExpBoilerplateDescription::RegExpBoilerplateDescriptionPrint(
- std::ostream& os) {
- PrintHeader(os, "RegExpBoilerplateDescription");
- os << "\n - data: " << Brief(data());
- os << "\n - source: " << Brief(source());
- os << "\n - flags: " << flags();
- os << "\n";
-}
-
#if V8_ENABLE_WEBASSEMBLY
void AsmWasmData::AsmWasmDataPrint(std::ostream& os) {
PrintHeader(os, "AsmWasmData");
@@ -1899,10 +1786,11 @@ void WasmStruct::WasmStructPrint(std::ostream& os) {
os << Brief(base::ReadUnalignedValue<Object>(field_address));
break;
case wasm::kS128:
- case wasm::kBottom:
- case wasm::kVoid:
os << "UNIMPLEMENTED"; // TODO(7748): Implement.
break;
+ case wasm::kBottom:
+ case wasm::kVoid:
+ UNREACHABLE();
}
}
os << "\n";
@@ -1948,12 +1836,6 @@ void WasmArray::WasmArrayPrint(std::ostream& os) {
os << "\n";
}
-void WasmExceptionTag::WasmExceptionTagPrint(std::ostream& os) {
- PrintHeader(os, "WasmExceptionTag");
- os << "\n - index: " << index();
- os << "\n";
-}
-
void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "WasmInstanceObject");
os << "\n - module_object: " << Brief(module_object());
@@ -2045,15 +1927,6 @@ void WasmModuleObject::WasmModuleObjectPrint(std::ostream& os) {
os << "\n";
}
-void WasmTableObject::WasmTableObjectPrint(std::ostream& os) {
- PrintHeader(os, "WasmTableObject");
- os << "\n - elements: " << Brief(elements());
- os << "\n - maximum_length: " << Brief(maximum_length());
- os << "\n - dispatch_tables: " << Brief(dispatch_tables());
- os << "\n - raw_type: " << raw_type();
- os << "\n";
-}
-
void WasmGlobalObject::WasmGlobalObjectPrint(std::ostream& os) {
PrintHeader(os, "WasmGlobalObject");
if (type().is_reference()) {
@@ -2069,21 +1942,6 @@ void WasmGlobalObject::WasmGlobalObjectPrint(std::ostream& os) {
os << "\n";
}
-void WasmMemoryObject::WasmMemoryObjectPrint(std::ostream& os) {
- PrintHeader(os, "WasmMemoryObject");
- os << "\n - array_buffer: " << Brief(array_buffer());
- os << "\n - maximum_pages: " << maximum_pages();
- os << "\n - instances: " << Brief(instances());
- os << "\n";
-}
-
-void WasmTagObject::WasmTagObjectPrint(std::ostream& os) {
- PrintHeader(os, "WasmTagObject");
- os << "\n - serialized_signature: " << Brief(serialized_signature());
- os << "\n - tag: " << Brief(tag());
- os << "\n";
-}
-
void WasmIndirectFunctionTable::WasmIndirectFunctionTablePrint(
std::ostream& os) {
PrintHeader(os, "WasmIndirectFunctionTable");
@@ -2141,13 +1999,6 @@ void StoreHandler::StoreHandlerPrint(std::ostream& os) {
os << "\n";
}
-void AccessorPair::AccessorPairPrint(std::ostream& os) {
- PrintHeader(os, "AccessorPair");
- os << "\n - getter: " << Brief(getter());
- os << "\n - setter: " << Brief(setter());
- os << "\n";
-}
-
void CallHandlerInfo::CallHandlerInfoPrint(std::ostream& os) {
PrintHeader(os, "CallHandlerInfo");
os << "\n - callback: " << Brief(callback());
@@ -2431,18 +2282,6 @@ void ScopeInfo::ScopeInfoPrint(std::ostream& os) {
os << "\n";
}
-void StackFrameInfo::StackFrameInfoPrint(std::ostream& os) {
- PrintHeader(os, "StackFrameInfo");
- os << "\n - receiver_or_instance: " << Brief(receiver_or_instance());
- os << "\n - function: " << Brief(function());
- os << "\n - code_object: " << Brief(TorqueGeneratedClass::code_object());
- os << "\n - code_offset_or_source_position: "
- << code_offset_or_source_position();
- os << "\n - flags: " << flags();
- os << "\n - parameters: " << Brief(parameters());
- os << "\n";
-}
-
void PreparseData::PreparseDataPrint(std::ostream& os) {
PrintHeader(os, "PreparseData");
os << "\n - data_length: " << data_length();
@@ -2459,13 +2298,6 @@ void PreparseData::PreparseDataPrint(std::ostream& os) {
os << "\n";
}
-void InterpreterData::InterpreterDataPrint(std::ostream& os) {
- PrintHeader(os, "InterpreterData");
- os << "\n - bytecode_array: " << Brief(bytecode_array());
- os << "\n - interpreter_trampoline: " << Brief(interpreter_trampoline());
- os << "\n";
-}
-
template <HeapObjectReferenceType kRefType, typename StorageType>
void TaggedImpl<kRefType, StorageType>::Print() {
StdoutStream os;
@@ -2659,12 +2491,12 @@ void DescriptorArray::PrintDescriptorDetails(std::ostream& os,
details.PrintAsFastTo(os, mode);
os << " @ ";
switch (details.location()) {
- case kField: {
+ case PropertyLocation::kField: {
FieldType field_type = GetFieldType(descriptor);
field_type.PrintTo(os);
break;
}
- case kDescriptor:
+ case PropertyLocation::kDescriptor:
Object value = GetStrongValue(descriptor);
os << Brief(value);
if (value.IsAccessorPair()) {
diff --git a/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc b/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc
index c3977cbf3e..ed899d9212 100644
--- a/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc
+++ b/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc
@@ -134,6 +134,8 @@ class Decoder {
void DecodeVType(Instruction* instr);
void DecodeRvvIVV(Instruction* instr);
+ void DecodeRvvFVV(Instruction* instr);
+ void DecodeRvvFVF(Instruction* instr);
void DecodeRvvIVI(Instruction* instr);
void DecodeRvvIVX(Instruction* instr);
void DecodeRvvVL(Instruction* instr);
@@ -800,7 +802,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
UNREACHABLE();
}
- case 'v': { // 'vs1: Raw values from register fields
+ case 'v': {
if (format[1] == 'd') {
DCHECK(STRING_STARTS_WITH(format, "vd"));
PrintVd(instr);
@@ -1912,6 +1914,9 @@ void Decoder::DecodeRvvIVV(Instruction* instr) {
case RO_V_VSADD_VV:
Format(instr, "vsadd.vv 'vd, 'vs2, 'vs1'vm");
break;
+ case RO_V_VSADDU_VV:
+ Format(instr, "vsaddu.vv 'vd, 'vs2, 'vs1'vm");
+ break;
case RO_V_VSUB_VV:
Format(instr, "vsub.vv 'vd, 'vs2, 'vs1'vm");
break;
@@ -1996,6 +2001,9 @@ void Decoder::DecodeRvvIVI(Instruction* instr) {
case RO_V_VSADD_VI:
Format(instr, "vsadd.vi 'vd, 'vs2, 'simm5'vm");
break;
+ case RO_V_VSADDU_VI:
+ Format(instr, "vsaddu.vi 'vd, 'vs2, 'simm5'vm");
+ break;
case RO_V_VRSUB_VI:
Format(instr, "vrsub.vi 'vd, 'vs2, 'simm5'vm");
break;
@@ -2074,6 +2082,9 @@ void Decoder::DecodeRvvIVX(Instruction* instr) {
case RO_V_VSADD_VX:
Format(instr, "vsadd.vx 'vd, 'vs2, 'rs1'vm");
break;
+ case RO_V_VSADDU_VX:
+ Format(instr, "vsaddu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
case RO_V_VSUB_VX:
Format(instr, "vsub.vx 'vd, 'vs2, 'rs1'vm");
break;
@@ -2155,6 +2166,12 @@ void Decoder::DecodeRvvIVX(Instruction* instr) {
UNREACHABLE();
}
break;
+ case RO_V_VSLL_VX:
+ Format(instr, "vsll.vx 'vd, 'vs2, 'rs1");
+ break;
+ case RO_V_VSRL_VX:
+ Format(instr, "vsrl.vx 'vd, 'vs2, 'rs1");
+ break;
default:
UNSUPPORTED_RISCV();
break;
@@ -2205,13 +2222,118 @@ void Decoder::DecodeRvvMVX(Instruction* instr) {
}
}
+void Decoder::DecodeRvvFVV(Instruction* instr) {
+ DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_FVV);
+ switch (instr->InstructionBits() & kVTypeMask) {
+ case RO_V_VFUNARY0:
+ switch (instr->Vs1Value()) {
+ case VFCVT_XU_F_V:
+ Format(instr, "vfcvt.xu.f.v 'vd, 'vs2'vm");
+ break;
+ case VFCVT_X_F_V:
+ Format(instr, "vfcvt.x.f.v 'vd, 'vs2'vm");
+ break;
+ case VFNCVT_F_F_W:
+ Format(instr, "vfncvt.f.f.w 'vd, 'vs2'vm");
+ break;
+ case VFCVT_F_X_V:
+ Format(instr, "vfcvt.f.x.v 'vd, 'vs2'vm");
+ break;
+ case VFCVT_F_XU_V:
+ Format(instr, "vfcvt.f.xu.v 'vd, 'vs2'vm");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+ break;
+ case RO_V_VFUNARY1:
+ switch (instr->Vs1Value()) {
+ case VFCLASS_V:
+ Format(instr, "vfclass.v 'vd, 'vs2'vm");
+ break;
+ default:
+ break;
+ }
+ break;
+ case RO_V_VMFEQ_VV:
+ Format(instr, "vmfeq.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMFNE_VV:
+ Format(instr, "vmfne.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMFLT_VV:
+ Format(instr, "vmflt.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMFLE_VV:
+ Format(instr, "vmfle.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFMAX_VV:
+ Format(instr, "vfmax.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFMIN_VV:
+ Format(instr, "vfmin.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFSGNJ_VV:
+ Format(instr, "vfsgnj.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFSGNJN_VV:
+ if (instr->Vs1Value() == instr->Vs2Value()) {
+ Format(instr, "vneg.vv 'vd, 'vs1'vm");
+ } else {
+ Format(instr, "vfsgnjn.vv 'vd, 'vs2, 'vs1'vm");
+ }
+ break;
+ case RO_V_VFSGNJX_VV:
+ if (instr->Vs1Value() == instr->Vs2Value()) {
+ Format(instr, "vabs.vv 'vd, 'vs1'vm");
+ } else {
+ Format(instr, "vfsgnjn.vv 'vd, 'vs2, 'vs1'vm");
+ }
+ break;
+ case RO_V_VFADD_VV:
+ Format(instr, "vfadd.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFSUB_VV:
+ Format(instr, "vfsub.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFDIV_VV:
+ Format(instr, "vfdiv.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VFMUL_VV:
+ Format(instr, "vfmul.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
+void Decoder::DecodeRvvFVF(Instruction* instr) {
+ DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_FVF);
+ switch (instr->InstructionBits() & kVTypeMask) {
+ case RO_V_VFSGNJ_VF:
+ Format(instr, "vfsgnj.vf 'vd, 'vs2, 'fs1'vm");
+ break;
+ case RO_V_VFSGNJN_VF:
+ Format(instr, "vfsgnjn.vf 'vd, 'vs2, 'fs1'vm");
+ break;
+ case RO_V_VFSGNJX_VF:
+ Format(instr, "vfsgnjn.vf 'vd, 'vs2, 'fs1'vm");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
void Decoder::DecodeVType(Instruction* instr) {
switch (instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask)) {
case OP_IVV:
DecodeRvvIVV(instr);
return;
case OP_FVV:
- UNSUPPORTED_RISCV();
+ DecodeRvvFVV(instr);
return;
case OP_MVV:
DecodeRvvMVV(instr);
@@ -2502,7 +2624,7 @@ const char* NameConverter::NameOfXMMRegister(int reg) const {
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // RISC-V does not have the concept of a byte register.
- //return "nobytereg";
+ // return "nobytereg";
}
const char* NameConverter::NameInCode(byte* addr) const {
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.cc b/deps/v8/src/diagnostics/unwinding-info-win64.cc
index 2a0cf4ff02..d50767421a 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.cc
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.cc
@@ -22,36 +22,6 @@
// This has to come after windows.h.
#include <versionhelpers.h> // For IsWindows8OrGreater().
-// Forward declaration to keep this independent of Win8
-NTSYSAPI
-DWORD
-NTAPI
-RtlAddGrowableFunctionTable(
- _Out_ PVOID* DynamicTable,
- _In_reads_(MaximumEntryCount) PRUNTIME_FUNCTION FunctionTable,
- _In_ DWORD EntryCount,
- _In_ DWORD MaximumEntryCount,
- _In_ ULONG_PTR RangeBase,
- _In_ ULONG_PTR RangeEnd
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlGrowFunctionTable(
- _Inout_ PVOID DynamicTable,
- _In_ DWORD NewEntryCount
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlDeleteGrowableFunctionTable(
- _In_ PVOID DynamicTable
- );
-
namespace v8 {
namespace internal {
namespace win64_unwindinfo {
diff --git a/deps/v8/src/diagnostics/x64/disasm-x64.cc b/deps/v8/src/diagnostics/x64/disasm-x64.cc
index ce0a8a4b3f..469a6538dc 100644
--- a/deps/v8/src/diagnostics/x64/disasm-x64.cc
+++ b/deps/v8/src/diagnostics/x64/disasm-x64.cc
@@ -1261,46 +1261,6 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfCPURegister(regop));
current += PrintRightAVXOperand(current);
break;
- case 0x51:
- AppendToBuffer("vsqrtsd %s,%s,", NameOfAVXRegister(regop),
- NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
- case 0x58:
- AppendToBuffer("vaddsd %s,%s,", NameOfAVXRegister(regop),
- NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
- case 0x59:
- AppendToBuffer("vmulsd %s,%s,", NameOfAVXRegister(regop),
- NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
- case 0x5A:
- AppendToBuffer("vcvtsd2ss %s,%s,", NameOfAVXRegister(regop),
- NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
- case 0x5C:
- AppendToBuffer("vsubsd %s,%s,", NameOfAVXRegister(regop),
- NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
- case 0x5D:
- AppendToBuffer("vminsd %s,%s,", NameOfAVXRegister(regop),
- NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
- case 0x5E:
- AppendToBuffer("vdivsd %s,%s,", NameOfAVXRegister(regop),
- NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
- case 0x5F:
- AppendToBuffer("vmaxsd %s,%s,", NameOfAVXRegister(regop),
- NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
case 0xF0:
AppendToBuffer("vlddqu %s,", NameOfAVXRegister(regop));
current += PrintRightAVXOperand(current);
@@ -1315,6 +1275,14 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfAVXRegister(vvvv));
current += PrintRightAVXOperand(current);
break;
+#define DISASM_SSE2_INSTRUCTION_LIST_SD(instruction, _1, _2, opcode) \
+ case 0x##opcode: \
+ AppendToBuffer("v" #instruction " %s,%s,", NameOfAVXRegister(regop), \
+ NameOfAVXRegister(vvvv)); \
+ current += PrintRightAVXOperand(current); \
+ break;
+ SSE2_INSTRUCTION_LIST_SD(DISASM_SSE2_INSTRUCTION_LIST_SD)
+#undef DISASM_SSE2_INSTRUCTION_LIST_SD
default:
UnimplementedInstruction();
}
@@ -2843,9 +2811,9 @@ int DisassemblerX64::InstructionDecode(v8::base::Vector<char> out_buffer,
for (byte* bp = instr; bp < data; bp++) {
outp += v8::base::SNPrintF(out_buffer + outp, "%02x", *bp);
}
- // Indent instruction, leaving space for 9 bytes, i.e. 18 characters in hex.
- // 9-byte nop and rip-relative mov are (probably) the largest we emit.
- while (outp < 18) {
+ // Indent instruction, leaving space for 10 bytes, i.e. 20 characters in hex.
+ // 10-byte mov is (probably) the largest we emit.
+ while (outp < 20) {
outp += v8::base::SNPrintF(out_buffer + outp, " ");
}
diff --git a/deps/v8/src/execution/arguments-inl.h b/deps/v8/src/execution/arguments-inl.h
index 0be2325837..2f69cd7adc 100644
--- a/deps/v8/src/execution/arguments-inl.h
+++ b/deps/v8/src/execution/arguments-inl.h
@@ -15,6 +15,15 @@ namespace v8 {
namespace internal {
template <ArgumentsType T>
+Arguments<T>::ChangeValueScope::ChangeValueScope(Isolate* isolate,
+ Arguments* args, int index,
+ Object value)
+ : location_(args->address_of_arg_at(index)) {
+ old_value_ = handle(Object(*location_), isolate);
+ *location_ = value.ptr();
+}
+
+template <ArgumentsType T>
int Arguments<T>::smi_at(int index) const {
return Smi::ToInt(Object(*address_of_arg_at(index)));
}
diff --git a/deps/v8/src/execution/arguments.h b/deps/v8/src/execution/arguments.h
index 9ba80a401f..e1cd8d8c5f 100644
--- a/deps/v8/src/execution/arguments.h
+++ b/deps/v8/src/execution/arguments.h
@@ -33,6 +33,18 @@ namespace internal {
template <ArgumentsType arguments_type>
class Arguments {
public:
+ // Scope to temporarily change the value of an argument.
+ class ChangeValueScope {
+ public:
+ inline ChangeValueScope(Isolate* isolate, Arguments* args, int index,
+ Object value);
+ ~ChangeValueScope() { *location_ = old_value_->ptr(); }
+
+ private:
+ Address* location_;
+ Handle<Object> old_value_;
+ };
+
Arguments(int length, Address* arguments)
: length_(length), arguments_(arguments) {
DCHECK_GE(length_, 0);
@@ -51,10 +63,6 @@ class Arguments {
inline double number_at(int index) const;
- inline void set_at(int index, Object value) {
- *address_of_arg_at(index) = value.ptr();
- }
-
inline FullObjectSlot slot_at(int index) const {
return FullObjectSlot(address_of_arg_at(index));
}
diff --git a/deps/v8/src/execution/arm/simulator-arm.cc b/deps/v8/src/execution/arm/simulator-arm.cc
index 310ddab523..4ebfe6bbd6 100644
--- a/deps/v8/src/execution/arm/simulator-arm.cc
+++ b/deps/v8/src/execution/arm/simulator-arm.cc
@@ -1835,8 +1835,19 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
#endif
} else {
// builtin call.
+ // FAST_C_CALL is temporarily handled here as well, because we lack
+ // proper support for direct C calls with FP params in the simulator.
+ // The generic BUILTIN_CALL path assumes all parameters are passed in
+ // the GP registers, thus supporting calling the slow callback without
+ // crashing. The reason for that is that in the mjsunit tests we check
+ // the `fast_c_api.supports_fp_params` (which is false on non-simulator
+ // builds for arm/arm64), thus we expect that the slow path will be
+ // called. And since the slow path passes the arguments as a `const
+ // FunctionCallbackInfo<Value>&` (which is a GP argument), the call is
+ // made correctly.
DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
- redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
+ redirection->type() == ExternalReference::BUILTIN_CALL_PAIR ||
+ redirection->type() == ExternalReference::FAST_C_CALL);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF(
"Call to host function at %p "
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.cc b/deps/v8/src/execution/arm64/simulator-arm64.cc
index 5669838006..77fd2ffbd3 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.cc
+++ b/deps/v8/src/execution/arm64/simulator-arm64.cc
@@ -538,6 +538,17 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
TraceSim("Type: Unknown.\n");
UNREACHABLE();
+ // FAST_C_CALL is temporarily handled here as well, because we lack
+ // proper support for direct C calls with FP params in the simulator.
+ // The generic BUILTIN_CALL path assumes all parameters are passed in
+ // the GP registers, thus supporting calling the slow callback without
+ // crashing. The reason for that is that in the mjsunit tests we check
+ // the `fast_c_api.supports_fp_params` (which is false on non-simulator
+ // builds for arm/arm64), thus we expect that the slow path will be
+ // called. And since the slow path passes the arguments as a `const
+ // FunctionCallbackInfo<Value>&` (which is a GP argument), the call is
+ // made correctly.
+ case ExternalReference::FAST_C_CALL:
case ExternalReference::BUILTIN_CALL:
#if defined(V8_OS_WIN)
{
diff --git a/deps/v8/src/execution/execution.cc b/deps/v8/src/execution/execution.cc
index 689d99057e..4a2095a495 100644
--- a/deps/v8/src/execution/execution.cc
+++ b/deps/v8/src/execution/execution.cc
@@ -13,7 +13,8 @@
#if V8_ENABLE_WEBASSEMBLY
#include "src/compiler/wasm-compiler.h" // Only for static asserts.
-#endif // V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-engine.h"
+#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
@@ -252,6 +253,13 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
DCHECK(!params.receiver->IsJSGlobalObject());
DCHECK_LE(params.argc, FixedArray::kMaxLength);
+#if V8_ENABLE_WEBASSEMBLY
+ // If we have PKU support for Wasm, ensure that code is currently write
+ // protected for this thread.
+ DCHECK_IMPLIES(wasm::GetWasmCodeManager()->HasMemoryProtectionKeySupport(),
+ !wasm::GetWasmCodeManager()->MemoryProtectionKeyWritable());
+#endif // V8_ENABLE_WEBASSEMBLY
+
#ifdef USE_SIMULATOR
// Simulators use separate stacks for C++ and JS. JS stack overflow checks
// are performed whenever a JS function is called. However, it can be the case
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index 7c28145881..3691f14d86 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -142,7 +142,7 @@ void StackFrameIterator::Reset(ThreadLocalTop* top) {
StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type,
StackFrame::State* state) {
StackFrame* result = SingletonFor(type);
- DCHECK((!result) == (type == StackFrame::NONE));
+ DCHECK((!result) == (type == StackFrame::NO_FRAME_TYPE));
if (result) result->state_ = *state;
return result;
}
@@ -153,7 +153,7 @@ StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type) {
return &field##_;
switch (type) {
- case StackFrame::NONE:
+ case StackFrame::NO_FRAME_TYPE:
return nullptr;
STACK_FRAME_TYPE_LIST(FRAME_TYPE_CASE)
default:
@@ -318,7 +318,7 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
: StackFrameIteratorBase(isolate, false),
low_bound_(sp),
high_bound_(js_entry_sp),
- top_frame_type_(StackFrame::NONE),
+ top_frame_type_(StackFrame::NO_FRAME_TYPE),
top_context_address_(kNullAddress),
external_callback_scope_(isolate->external_callback_scope()),
top_link_register_(lr) {
@@ -412,7 +412,7 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
// The frame anyways will be skipped.
type = StackFrame::OPTIMIZED;
// Top frame is incomplete so we cannot reliably determine its type.
- top_frame_type_ = StackFrame::NONE;
+ top_frame_type_ = StackFrame::NO_FRAME_TYPE;
}
} else {
return;
@@ -597,7 +597,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
if (wasm::WasmCode* wasm_code =
wasm::GetWasmCodeManager()->LookupCode(pc)) {
switch (wasm_code->kind()) {
- case wasm::WasmCode::kFunction:
+ case wasm::WasmCode::kWasmFunction:
return WASM;
case wasm::WasmCode::kWasmToCapiWrapper:
return WASM_EXIT;
@@ -762,7 +762,7 @@ void ExitFrame::Iterate(RootVisitor* v) const {
}
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
- if (fp == 0) return NONE;
+ if (fp == 0) return NO_FRAME_TYPE;
StackFrame::Type type = ComputeFrameType(fp);
#if V8_ENABLE_WEBASSEMBLY
Address sp = type == WASM_EXIT ? WasmExitFrame::ComputeStackPointer(fp)
@@ -974,7 +974,7 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
safepoint_entry = table.FindEntry(inner_pointer);
stack_slots = wasm_code->stack_slots();
has_tagged_outgoing_params =
- wasm_code->kind() != wasm::WasmCode::kFunction &&
+ wasm_code->kind() != wasm::WasmCode::kWasmFunction &&
wasm_code->kind() != wasm::WasmCode::kWasmToCapiWrapper;
first_tagged_parameter_slot = wasm_code->first_tagged_parameter_slot();
num_tagged_parameter_slots = wasm_code->num_tagged_parameter_slots();
@@ -1059,7 +1059,7 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
// in the place on the stack that one finds the frame type.
UNREACHABLE();
case NATIVE:
- case NONE:
+ case NO_FRAME_TYPE:
case NUMBER_OF_TYPES:
case MANUAL:
UNREACHABLE();
@@ -1297,15 +1297,21 @@ void JavaScriptFrame::PrintTop(Isolate* isolate, FILE* file, bool print_args,
if (frame->IsConstructor()) PrintF(file, "new ");
JSFunction function = frame->function();
int code_offset = 0;
+ AbstractCode abstract_code = function.abstract_code(isolate);
if (frame->is_interpreted()) {
InterpretedFrame* iframe = reinterpret_cast<InterpretedFrame*>(frame);
code_offset = iframe->GetBytecodeOffset();
+ } else if (frame->is_baseline()) {
+ // TODO(pthier): AbstractCode should fully support Baseline code.
+ BaselineFrame* baseline_frame = BaselineFrame::cast(frame);
+ code_offset = baseline_frame->GetBytecodeOffset();
+ abstract_code = AbstractCode::cast(baseline_frame->GetBytecodeArray());
} else {
Code code = frame->unchecked_code();
code_offset = code.GetOffsetFromInstructionStart(isolate, frame->pc());
}
- PrintFunctionAndOffset(function, function.abstract_code(isolate),
- code_offset, file, print_line_number);
+ PrintFunctionAndOffset(function, abstract_code, code_offset, file,
+ print_line_number);
if (print_args) {
// function arguments
// (we are intentionally only printing the actually
@@ -1387,7 +1393,7 @@ int JavaScriptBuiltinContinuationFrame::ComputeParametersCount() const {
kJavaScriptCallArgCountRegister.code());
Object argc_object(
Memory<Address>(fp() + BuiltinContinuationFrameConstants::kArgCOffset));
- return Smi::ToInt(argc_object);
+ return Smi::ToInt(argc_object) - kJSArgcReceiverSlots;
}
intptr_t JavaScriptBuiltinContinuationFrame::GetSPToFPDelta() const {
@@ -1869,7 +1875,8 @@ JSFunction BuiltinFrame::function() const {
int BuiltinFrame::ComputeParametersCount() const {
const int offset = BuiltinFrameConstants::kLengthOffset;
- return Smi::ToInt(Object(base::Memory<Address>(fp() + offset)));
+ return Smi::ToInt(Object(base::Memory<Address>(fp() + offset))) -
+ kJSArgcReceiverSlots;
}
#if V8_ENABLE_WEBASSEMBLY
@@ -2170,9 +2177,9 @@ void JavaScriptFrame::Print(StringStream* accumulator, PrintMode mode,
accumulator->PrintName(scope_info.ContextLocalName(i));
accumulator->Add(" = ");
if (!context.is_null()) {
- int index = Context::MIN_CONTEXT_SLOTS + i;
- if (index < context.length()) {
- accumulator->Add("%o", context.get(index));
+ int slot_index = Context::MIN_CONTEXT_SLOTS + i;
+ if (slot_index < context.length()) {
+ accumulator->Add("%o", context.get(slot_index));
} else {
accumulator->Add(
"// warning: missing context slot - inconsistent frame?");
diff --git a/deps/v8/src/execution/frames.h b/deps/v8/src/execution/frames.h
index d81a9dd878..04979509a2 100644
--- a/deps/v8/src/execution/frames.h
+++ b/deps/v8/src/execution/frames.h
@@ -123,7 +123,7 @@ class StackFrame {
public:
#define DECLARE_TYPE(type, ignore) type,
enum Type {
- NONE = 0,
+ NO_FRAME_TYPE = 0,
STACK_FRAME_TYPE_LIST(DECLARE_TYPE) NUMBER_OF_TYPES,
// Used by FrameScope to indicate that the stack frame is constructed
// manually and the FrameScope does not need to emit code.
diff --git a/deps/v8/src/execution/futex-emulation.cc b/deps/v8/src/execution/futex-emulation.cc
index 2206b98c9f..c1120dd8eb 100644
--- a/deps/v8/src/execution/futex-emulation.cc
+++ b/deps/v8/src/execution/futex-emulation.cc
@@ -531,7 +531,8 @@ Object FutexEmulation::WaitAsync(Isolate* isolate,
Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
Handle<JSObject> promise_capability = factory->NewJSPromise();
- enum { kNotEqual, kTimedOut, kAsync } result_kind;
+ enum class ResultKind { kNotEqual, kTimedOut, kAsync };
+ ResultKind result_kind;
{
// 16. Perform EnterCriticalSection(WL).
NoGarbageCollectionMutexGuard lock_guard(g_mutex.Pointer());
@@ -543,11 +544,11 @@ Object FutexEmulation::WaitAsync(Isolate* isolate,
std::atomic<T>* p = reinterpret_cast<std::atomic<T>*>(
static_cast<int8_t*>(backing_store->buffer_start()) + addr);
if (p->load() != value) {
- result_kind = kNotEqual;
+ result_kind = ResultKind::kNotEqual;
} else if (use_timeout && rel_timeout_ns == 0) {
- result_kind = kTimedOut;
+ result_kind = ResultKind::kTimedOut;
} else {
- result_kind = kAsync;
+ result_kind = ResultKind::kAsync;
FutexWaitListNode* node = new FutexWaitListNode(
backing_store, addr, promise_capability, isolate);
@@ -571,7 +572,7 @@ Object FutexEmulation::WaitAsync(Isolate* isolate,
}
switch (result_kind) {
- case kNotEqual:
+ case ResultKind::kNotEqual:
// 18. If v is not equal to w, then
// ...
// c. Perform ! CreateDataPropertyOrThrow(resultObject, "async", false).
@@ -588,7 +589,7 @@ Object FutexEmulation::WaitAsync(Isolate* isolate,
.FromJust());
break;
- case kTimedOut:
+ case ResultKind::kTimedOut:
// 19. If t is 0 and mode is async, then
// ...
// c. Perform ! CreateDataPropertyOrThrow(resultObject, "async", false).
@@ -605,7 +606,7 @@ Object FutexEmulation::WaitAsync(Isolate* isolate,
.FromJust());
break;
- case kAsync:
+ case ResultKind::kAsync:
// Add the Promise into the NativeContext's atomics_waitasync_promises
// set, so that the list keeps it alive.
Handle<NativeContext> native_context(isolate->native_context());
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index b1b85e5bab..801cb8b132 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -52,6 +52,7 @@
#include "src/execution/simulator.h"
#include "src/execution/v8threads.h"
#include "src/execution/vm-state-inl.h"
+#include "src/handles/global-handles-inl.h"
#include "src/handles/persistent-handles.h"
#include "src/heap/heap-inl.h"
#include "src/heap/read-only-heap.h"
@@ -74,6 +75,7 @@
#include "src/objects/js-array-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/objects/js-weak-refs-inl.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
#include "src/objects/prototype.h"
@@ -682,7 +684,7 @@ class StackTraceBuilder {
#if V8_ENABLE_WEBASSEMBLY
void AppendWasmFrame(FrameSummary::WasmFrameSummary const& summary) {
- if (summary.code()->kind() != wasm::WasmCode::kFunction) return;
+ if (summary.code()->kind() != wasm::WasmCode::kWasmFunction) return;
Handle<WasmInstanceObject> instance = summary.wasm_instance();
int flags = StackFrameInfo::kIsWasm;
if (instance->module_object().is_asm_js()) {
@@ -830,6 +832,7 @@ class StackTraceBuilder {
};
bool GetStackTraceLimit(Isolate* isolate, int* result) {
+ DCHECK(!FLAG_correctness_fuzzer_suppressions);
Handle<JSObject> error = isolate->error_function();
Handle<String> key = isolate->factory()->stackTraceLimit_string();
@@ -876,7 +879,7 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
Builtin::kAsyncGeneratorAwaitResolveClosure) ||
IsBuiltinFunction(isolate, reaction->fulfill_handler(),
Builtin::kAsyncGeneratorYieldResolveClosure)) {
- // Now peak into the handlers' AwaitContext to get to
+ // Now peek into the handlers' AwaitContext to get to
// the JSGeneratorObject for the async function.
Handle<Context> context(
JSFunction::cast(reaction->fulfill_handler()).context(), isolate);
@@ -1074,7 +1077,7 @@ Handle<FixedArray> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
Builtin::kAsyncFunctionAwaitRejectClosure) ||
IsBuiltinFunction(isolate, promise_reaction_job_task->handler(),
Builtin::kAsyncGeneratorAwaitRejectClosure)) {
- // Now peak into the handlers' AwaitContext to get to
+ // Now peek into the handlers' AwaitContext to get to
// the JSGeneratorObject for the async function.
Handle<Context> context(
JSFunction::cast(promise_reaction_job_task->handler()).context(),
@@ -1129,7 +1132,10 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
FrameSkipMode mode,
Handle<Object> caller) {
int limit;
- if (!GetStackTraceLimit(this, &limit)) return factory()->undefined_value();
+ if (FLAG_correctness_fuzzer_suppressions ||
+ !GetStackTraceLimit(this, &limit)) {
+ return factory()->undefined_value();
+ }
CaptureStackTraceOptions options;
options.limit = limit;
@@ -1527,7 +1533,7 @@ void ReportBootstrappingException(Handle<Object> exception,
PrintF(" <not available>\n");
} else {
PrintF("\n");
- int line_number = 1;
+ line_number = 1;
PrintF("%5d: ", line_number);
for (int i = 0; i < len; i++) {
uint16_t character = src->Get(i);
@@ -1564,7 +1570,9 @@ Handle<JSMessageObject> Isolate::CreateMessageOrAbort(
// print a user-friendly stack trace (not an internal one).
PrintF(stderr, "%s\n\nFROM\n",
MessageHandler::GetLocalizedMessage(this, message_obj).get());
- PrintCurrentStackTrace(stderr);
+ std::ostringstream stack_trace_stream;
+ PrintCurrentStackTrace(stack_trace_stream);
+ PrintF(stderr, "%s", stack_trace_stream.str().c_str());
base::OS::Abort();
}
}
@@ -2125,7 +2133,7 @@ Object Isolate::PromoteScheduledException() {
return ReThrow(thrown);
}
-void Isolate::PrintCurrentStackTrace(FILE* out) {
+void Isolate::PrintCurrentStackTrace(std::ostream& out) {
CaptureStackTraceOptions options;
options.limit = 0;
options.skip_mode = SKIP_NONE;
@@ -2441,8 +2449,7 @@ bool PromiseHasUserDefinedRejectHandlerInternal(Isolate* isolate,
Handle<PromiseCapability>::cast(promise_or_capability)->promise(),
isolate);
}
- Handle<JSPromise> promise =
- Handle<JSPromise>::cast(promise_or_capability);
+ promise = Handle<JSPromise>::cast(promise_or_capability);
if (!reaction->reject_handler().IsUndefined(isolate)) {
Handle<JSReceiver> reject_handler(
JSReceiver::cast(reaction->reject_handler()), isolate);
@@ -2614,6 +2621,20 @@ bool Isolate::AreWasmExceptionsEnabled(Handle<Context> context) {
#endif // V8_ENABLE_WEBASSEMBLY
}
+bool Isolate::IsWasmDynamicTieringEnabled() {
+#if V8_ENABLE_WEBASSEMBLY
+ if (wasm_dynamic_tiering_enabled_callback()) {
+ HandleScope handle_scope(this);
+ v8::Local<v8::Context> api_context =
+ v8::Utils::ToLocal(handle(context(), this));
+ return wasm_dynamic_tiering_enabled_callback()(api_context);
+ }
+ return FLAG_wasm_dynamic_tiering;
+#else
+ return false;
+#endif // V8_ENABLE_WEBASSEMBLY
+}
+
Handle<Context> Isolate::GetIncumbentContext() {
JavaScriptFrameIterator it(this);
@@ -3119,8 +3140,6 @@ void Isolate::Deinit() {
// All client isolates should already be detached.
DCHECK_NULL(client_isolate_head_);
- DumpAndResetStats();
-
if (FLAG_print_deopt_stress) {
PrintF(stdout, "=== Stress deopt counter: %u\n", stress_deopt_count_);
}
@@ -3136,6 +3155,11 @@ void Isolate::Deinit() {
// not cause a GC.
heap_.StartTearDown();
+ // This stops cancelable tasks (i.e. concurrent marking tasks).
+ // Stop concurrent tasks before destroying resources since they might still
+ // use those.
+ cancelable_task_manager()->CancelAndWait();
+
ReleaseSharedPtrs();
string_table_.reset();
@@ -3157,8 +3181,9 @@ void Isolate::Deinit() {
delete baseline_batch_compiler_;
baseline_batch_compiler_ = nullptr;
- // This stops cancelable tasks (i.e. concurrent marking tasks)
- cancelable_task_manager()->CancelAndWait();
+ // After all concurrent tasks are stopped, we know for sure that stats aren't
+ // updated anymore.
+ DumpAndResetStats();
main_thread_local_isolate_->heap()->FreeLinearAllocationArea();
@@ -3598,7 +3623,6 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
store_stub_cache_ = new StubCache(this);
materialized_object_store_ = new MaterializedObjectStore(this);
regexp_stack_ = new RegExpStack();
- regexp_stack_->isolate_ = this;
date_cache_ = new DateCache();
heap_profiler_ = new HeapProfiler(heap());
interpreter_ = new interpreter::Interpreter(this);
@@ -3728,7 +3752,6 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
// If we are deserializing, read the state into the now-empty heap.
{
- AlwaysAllocateScope always_allocate(heap());
CodeSpaceMemoryModificationScope modification_scope(heap());
if (create_heap_objects) {
@@ -4553,7 +4576,7 @@ void Isolate::RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
debug::kDebugDidHandle, promise->async_task_id(), false);
break;
case PromiseHookType::kInit:
- debug::DebugAsyncActionType type = debug::kDebugPromiseThen;
+ debug::DebugAsyncActionType action_type = debug::kDebugPromiseThen;
bool last_frame_was_promise_builtin = false;
JavaScriptFrameIterator it(this);
while (!it.done()) {
@@ -4569,21 +4592,22 @@ void Isolate::RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
promise->set_async_task_id(++async_task_count_);
}
async_event_delegate_->AsyncEventOccurred(
- type, promise->async_task_id(), debug()->IsBlackboxed(info));
+ action_type, promise->async_task_id(),
+ debug()->IsBlackboxed(info));
}
return;
}
last_frame_was_promise_builtin = false;
if (info->HasBuiltinId()) {
if (info->builtin_id() == Builtin::kPromisePrototypeThen) {
- type = debug::kDebugPromiseThen;
+ action_type = debug::kDebugPromiseThen;
last_frame_was_promise_builtin = true;
} else if (info->builtin_id() == Builtin::kPromisePrototypeCatch) {
- type = debug::kDebugPromiseCatch;
+ action_type = debug::kDebugPromiseCatch;
last_frame_was_promise_builtin = true;
} else if (info->builtin_id() ==
Builtin::kPromisePrototypeFinally) {
- type = debug::kDebugPromiseFinally;
+ action_type = debug::kDebugPromiseFinally;
last_frame_was_promise_builtin = true;
}
}
@@ -4707,6 +4731,24 @@ void Isolate::CheckDetachedContextsAfterGC() {
}
}
+void Isolate::DetachGlobal(Handle<Context> env) {
+ counters()->errors_thrown_per_context()->AddSample(
+ env->native_context().GetErrorsThrown());
+
+ ReadOnlyRoots roots(this);
+ Handle<JSGlobalProxy> global_proxy(env->global_proxy(), this);
+ global_proxy->set_native_context(roots.null_value());
+ // NOTE: Turbofan's JSNativeContextSpecialization depends on DetachGlobal
+ // causing a map change.
+ JSObject::ForceSetPrototype(this, global_proxy, factory()->null_value());
+ global_proxy->map().set_constructor_or_back_pointer(roots.null_value(),
+ kRelaxedStore);
+ if (FLAG_track_detached_contexts) AddDetachedContext(env);
+ DCHECK(global_proxy->IsDetached());
+
+ env->native_context().set_microtask_queue(this, nullptr);
+}
+
double Isolate::LoadStartTimeMs() {
base::MutexGuard guard(&rail_mutex_);
return load_start_time_ms_;
@@ -4784,48 +4826,47 @@ void Isolate::CollectSourcePositionsForAllBytecodeArrays() {
}
#ifdef V8_INTL_SUPPORT
+
namespace {
-std::string GetStringFromLocale(Handle<Object> locales_obj) {
- DCHECK(locales_obj->IsString() || locales_obj->IsUndefined());
- if (locales_obj->IsString()) {
- return std::string(String::cast(*locales_obj).ToCString().get());
- }
- return "";
+std::string GetStringFromLocales(Isolate* isolate, Handle<Object> locales) {
+ if (locales->IsUndefined(isolate)) return "";
+ return std::string(String::cast(*locales).ToCString().get());
}
-} // namespace
-icu::UMemory* Isolate::get_cached_icu_object(ICUObjectCacheType cache_type,
- Handle<Object> locales_obj) {
- std::string locale = GetStringFromLocale(locales_obj);
- auto value = icu_object_cache_.find(cache_type);
- if (value == icu_object_cache_.end()) return nullptr;
+bool StringEqualsLocales(Isolate* isolate, const std::string& str,
+ Handle<Object> locales) {
+ if (locales->IsUndefined(isolate)) return str == "";
+ return Handle<String>::cast(locales)->IsEqualTo(
+ base::VectorOf(str.c_str(), str.length()));
+}
- ICUCachePair pair = value->second;
- if (pair.first != locale) return nullptr;
+} // namespace
- return pair.second.get();
+icu::UMemory* Isolate::get_cached_icu_object(ICUObjectCacheType cache_type,
+ Handle<Object> locales) {
+ const ICUObjectCacheEntry& entry =
+ icu_object_cache_[static_cast<int>(cache_type)];
+ return StringEqualsLocales(this, entry.locales, locales) ? entry.obj.get()
+ : nullptr;
}
-void Isolate::set_icu_object_in_cache(
- ICUObjectCacheType cache_type, Handle<Object> locales_obj,
- std::shared_ptr<icu::UMemory> icu_formatter) {
- std::string locale = GetStringFromLocale(locales_obj);
- ICUCachePair pair = std::make_pair(locale, icu_formatter);
-
- auto it = icu_object_cache_.find(cache_type);
- if (it == icu_object_cache_.end()) {
- icu_object_cache_.insert({cache_type, pair});
- } else {
- it->second = pair;
- }
+void Isolate::set_icu_object_in_cache(ICUObjectCacheType cache_type,
+ Handle<Object> locales,
+ std::shared_ptr<icu::UMemory> obj) {
+ icu_object_cache_[static_cast<int>(cache_type)] = {
+ GetStringFromLocales(this, locales), std::move(obj)};
}
void Isolate::clear_cached_icu_object(ICUObjectCacheType cache_type) {
- icu_object_cache_.erase(cache_type);
+ icu_object_cache_[static_cast<int>(cache_type)] = ICUObjectCacheEntry{};
}
-void Isolate::ClearCachedIcuObjects() { icu_object_cache_.clear(); }
+void Isolate::clear_cached_icu_objects() {
+ for (int i = 0; i < kICUObjectCacheTypeCount; i++) {
+ clear_cached_icu_object(static_cast<ICUObjectCacheType>(i));
+ }
+}
#endif // V8_INTL_SUPPORT
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index 6233d56906..2edc34a3e6 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -447,6 +447,8 @@ using DebugObjectCache = std::vector<Handle<HeapObject>>;
V(WasmLoadSourceMapCallback, wasm_load_source_map_callback, nullptr) \
V(WasmSimdEnabledCallback, wasm_simd_enabled_callback, nullptr) \
V(WasmExceptionsEnabledCallback, wasm_exceptions_enabled_callback, nullptr) \
+ V(WasmDynamicTieringEnabledCallback, wasm_dynamic_tiering_enabled_callback, \
+ nullptr) \
/* State for Relocatable. */ \
V(Relocatable*, relocatable_top, nullptr) \
V(DebugObjectCache*, string_stream_debug_object_cache, nullptr) \
@@ -715,6 +717,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
bool IsWasmSimdEnabled(Handle<Context> context);
bool AreWasmExceptionsEnabled(Handle<Context> context);
+ bool IsWasmDynamicTieringEnabled();
THREAD_LOCAL_TOP_ADDRESS(Context, pending_handler_context)
THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_entrypoint)
@@ -853,7 +856,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
v8::Isolate::AbortOnUncaughtExceptionCallback callback);
enum PrintStackMode { kPrintStackConcise, kPrintStackVerbose };
- void PrintCurrentStackTrace(FILE* out);
+ void PrintCurrentStackTrace(std::ostream& out);
void PrintStack(StringStream* accumulator,
PrintStackMode mode = kPrintStackVerbose);
void PrintStack(FILE* out, PrintStackMode mode = kPrintStackVerbose);
@@ -1076,6 +1079,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
return isolate_data()->cage_base();
}
+ Address code_cage_base() const { return cage_base(); }
+
// When pointer compression is on, the PtrComprCage used by this
// Isolate. Otherwise nullptr.
VirtualMemoryCage* GetPtrComprCage() {
@@ -1350,18 +1355,18 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
default_locale_ = locale;
}
- // enum to access the icu object cache.
enum class ICUObjectCacheType{
kDefaultCollator, kDefaultNumberFormat, kDefaultSimpleDateFormat,
kDefaultSimpleDateFormatForTime, kDefaultSimpleDateFormatForDate};
+ static constexpr int kICUObjectCacheTypeCount = 5;
icu::UMemory* get_cached_icu_object(ICUObjectCacheType cache_type,
Handle<Object> locales);
void set_icu_object_in_cache(ICUObjectCacheType cache_type,
- Handle<Object> locale,
+ Handle<Object> locales,
std::shared_ptr<icu::UMemory> obj);
void clear_cached_icu_object(ICUObjectCacheType cache_type);
- void ClearCachedIcuObjects();
+ void clear_cached_icu_objects();
#endif // V8_INTL_SUPPORT
@@ -1560,6 +1565,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void AddDetachedContext(Handle<Context> context);
void CheckDetachedContextsAfterGC();
+ // Detach the environment from its outer global object.
+ void DetachGlobal(Handle<Context> env);
+
std::vector<Object>* startup_object_cache() { return &startup_object_cache_; }
bool IsGeneratingEmbeddedBuiltins() const {
@@ -2033,14 +2041,18 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
#ifdef V8_INTL_SUPPORT
std::string default_locale_;
- struct ICUObjectCacheTypeHash {
- std::size_t operator()(ICUObjectCacheType a) const {
- return static_cast<std::size_t>(a);
- }
+ // The cache stores the most recently accessed {locales,obj} pair for each
+ // cache type.
+ struct ICUObjectCacheEntry {
+ std::string locales;
+ std::shared_ptr<icu::UMemory> obj;
+
+ ICUObjectCacheEntry() = default;
+ ICUObjectCacheEntry(std::string locales, std::shared_ptr<icu::UMemory> obj)
+ : locales(locales), obj(std::move(obj)) {}
};
- typedef std::pair<std::string, std::shared_ptr<icu::UMemory>> ICUCachePair;
- std::unordered_map<ICUObjectCacheType, ICUCachePair, ICUObjectCacheTypeHash>
- icu_object_cache_;
+
+ ICUObjectCacheEntry icu_object_cache_[kICUObjectCacheTypeCount];
#endif // V8_INTL_SUPPORT
// true if being profiled. Causes collection of extra compile info.
diff --git a/deps/v8/src/execution/local-isolate-inl.h b/deps/v8/src/execution/local-isolate-inl.h
index ca7c119b6b..80baf1ab0e 100644
--- a/deps/v8/src/execution/local-isolate-inl.h
+++ b/deps/v8/src/execution/local-isolate-inl.h
@@ -13,6 +13,11 @@ namespace v8 {
namespace internal {
Address LocalIsolate::cage_base() const { return isolate_->cage_base(); }
+
+Address LocalIsolate::code_cage_base() const {
+ return isolate_->code_cage_base();
+}
+
ReadOnlyHeap* LocalIsolate::read_only_heap() const {
return isolate_->read_only_heap();
}
diff --git a/deps/v8/src/execution/local-isolate.h b/deps/v8/src/execution/local-isolate.h
index 55891f87c5..82a715dfeb 100644
--- a/deps/v8/src/execution/local-isolate.h
+++ b/deps/v8/src/execution/local-isolate.h
@@ -58,6 +58,7 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
LocalHeap* heap() { return &heap_; }
inline Address cage_base() const;
+ inline Address code_cage_base() const;
inline ReadOnlyHeap* read_only_heap() const;
inline Object root(RootIndex index) const;
inline Handle<Object> root_handle(RootIndex index) const;
diff --git a/deps/v8/src/execution/messages.cc b/deps/v8/src/execution/messages.cc
index 2628e7a673..10d89ca14e 100644
--- a/deps/v8/src/execution/messages.cc
+++ b/deps/v8/src/execution/messages.cc
@@ -821,11 +821,11 @@ MessageTemplate UpdateErrorTemplate(CallPrinter::ErrorHint hint,
Handle<JSObject> ErrorUtils::NewIteratorError(Isolate* isolate,
Handle<Object> source) {
MessageLocation location;
- CallPrinter::ErrorHint hint = CallPrinter::kNone;
+ CallPrinter::ErrorHint hint = CallPrinter::ErrorHint::kNone;
Handle<String> callsite = RenderCallSite(isolate, source, &location, &hint);
MessageTemplate id = MessageTemplate::kNotIterableNoSymbolLoad;
- if (hint == CallPrinter::kNone) {
+ if (hint == CallPrinter::ErrorHint::kNone) {
Handle<Symbol> iterator_symbol = isolate->factory()->iterator_symbol();
return isolate->factory()->NewTypeError(id, callsite, iterator_symbol);
}
@@ -871,7 +871,7 @@ Object ErrorUtils::ThrowSpreadArgError(Isolate* isolate, MessageTemplate id,
Handle<JSObject> ErrorUtils::NewCalledNonCallableError(Isolate* isolate,
Handle<Object> source) {
MessageLocation location;
- CallPrinter::ErrorHint hint = CallPrinter::kNone;
+ CallPrinter::ErrorHint hint = CallPrinter::ErrorHint::kNone;
Handle<String> callsite = RenderCallSite(isolate, source, &location, &hint);
MessageTemplate id = MessageTemplate::kCalledNonCallable;
id = UpdateErrorTemplate(hint, id);
@@ -881,7 +881,7 @@ Handle<JSObject> ErrorUtils::NewCalledNonCallableError(Isolate* isolate,
Handle<JSObject> ErrorUtils::NewConstructedNonConstructable(
Isolate* isolate, Handle<Object> source) {
MessageLocation location;
- CallPrinter::ErrorHint hint = CallPrinter::kNone;
+ CallPrinter::ErrorHint hint = CallPrinter::ErrorHint::kNone;
Handle<String> callsite = RenderCallSite(isolate, source, &location, &hint);
MessageTemplate id = MessageTemplate::kNotConstructor;
return isolate->factory()->NewTypeError(id, callsite);
@@ -974,7 +974,6 @@ Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
callsite, object);
}
} else {
- Handle<Object> key_handle;
if (!key.ToHandle(&key_handle) ||
!maybe_property_name.ToHandle(&property_name)) {
error = isolate->factory()->NewTypeError(
diff --git a/deps/v8/src/execution/ppc/simulator-ppc.cc b/deps/v8/src/execution/ppc/simulator-ppc.cc
index f6ee75e809..d9dc7813ee 100644
--- a/deps/v8/src/execution/ppc/simulator-ppc.cc
+++ b/deps/v8/src/execution/ppc/simulator-ppc.cc
@@ -1193,7 +1193,18 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
set_register(r3, result_buffer);
}
} else {
- DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
+ // FAST_C_CALL is temporarily handled here as well, because we lack
+ // proper support for direct C calls with FP params in the simulator.
+ // The generic BUILTIN_CALL path assumes all parameters are passed in
+ // the GP registers, thus supporting calling the slow callback without
+ // crashing. The reason for that is that in the mjsunit tests we check
+ // the `fast_c_api.supports_fp_params` (which is false on
+ // non-simulator builds for arm/arm64), thus we expect that the slow
+ // path will be called. And since the slow path passes the arguments
+ // as a `const FunctionCallbackInfo<Value>&` (which is a GP argument),
+ // the call is made correctly.
+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
+ redirection->type() == ExternalReference::FAST_C_CALL);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
intptr_t result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
@@ -3640,8 +3651,8 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
? kRoundToZero
: (fp_condition_reg_ & kFPRoundingModeMask);
uint64_t frt_val;
- uint64_t kMinVal = 0;
- uint64_t kMaxVal = kMinVal - 1;
+ uint64_t kMinVal = kMinUInt32;
+ uint64_t kMaxVal = kMaxUInt32;
bool invalid_convert = false;
if (std::isnan(frb_val)) {
@@ -3688,7 +3699,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int fra = instr->RAValue();
double frb_val = get_double_from_d_register(frb);
double fra_val = get_double_from_d_register(fra);
- double frt_val = std::copysign(fra_val, frb_val);
+ double frt_val = std::copysign(frb_val, fra_val);
set_d_register_from_double(frt, frt_val);
return;
}
diff --git a/deps/v8/src/execution/riscv64/simulator-riscv64.cc b/deps/v8/src/execution/riscv64/simulator-riscv64.cc
index 1b72aa9862..4d289c4d20 100644
--- a/deps/v8/src/execution/riscv64/simulator-riscv64.cc
+++ b/deps/v8/src/execution/riscv64/simulator-riscv64.cc
@@ -356,6 +356,7 @@
#define RVV_VI_LOOP_CMP_END \
vdi = (vdi & ~mmask) | (((res) << mpos) & mmask); \
} \
+ rvv_trace_vd(); \
set_rvv_vstart(0);
// comparision result to masking register
@@ -374,8 +375,7 @@
VV_CMP_PARAMS(64); \
BODY; \
} \
- RVV_VI_LOOP_CMP_END \
- rvv_trace_vd();
+ RVV_VI_LOOP_CMP_END
#define RVV_VI_VX_LOOP_CMP(BODY) \
RVV_VI_LOOP_CMP_BASE \
@@ -462,6 +462,116 @@
} \
RVV_VI_LOOP_CMP_END
+#define RVV_VI_VFP_LOOP_BASE \
+ for (uint64_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
+ RVV_VI_LOOP_MASK_SKIP();
+
+#define RVV_VI_VFP_LOOP_END \
+ } \
+ set_rvv_vstart(0);
+
+#define RVV_VI_VFP_VF_LOOP(BODY16, BODY32, BODY64) \
+ RVV_VI_VFP_LOOP_BASE \
+ switch (rvv_vsew()) { \
+ case E16: { \
+ UNIMPLEMENTED(); \
+ } \
+ case E32: { \
+ float& vd = Rvvelt<float>(rvv_vd_reg(), i, true); \
+ float fs1 = static_cast<float>(get_fpu_register(rs1_reg())); \
+ float vs2 = Rvvelt<float>(rvv_vs2_reg(), i); \
+ BODY32; \
+ break; \
+ } \
+ case E64: { \
+ double& vd = Rvvelt<double>(rvv_vd_reg(), i, true); \
+ double fs1 = static_cast<double>(get_fpu_register(rs1_reg())); \
+ double vs2 = Rvvelt<double>(rvv_vs2_reg(), i); \
+ BODY64; \
+ break; \
+ } \
+ default: \
+ UNREACHABLE(); \
+ break; \
+ } \
+ RVV_VI_VFP_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VFP_VV_LOOP(BODY16, BODY32, BODY64) \
+ RVV_VI_VFP_LOOP_BASE \
+ switch (rvv_vsew()) { \
+ case E16: { \
+ UNIMPLEMENTED(); \
+ break; \
+ } \
+ case E32: { \
+ float& vd = Rvvelt<float>(rvv_vd_reg(), i, true); \
+ float vs1 = Rvvelt<float>(rvv_vs1_reg(), i); \
+ float vs2 = Rvvelt<float>(rvv_vs2_reg(), i); \
+ BODY32; \
+ break; \
+ } \
+ case E64: { \
+ double& vd = Rvvelt<double>(rvv_vd_reg(), i, true); \
+ double vs1 = Rvvelt<double>(rvv_vs1_reg(), i); \
+ double vs2 = Rvvelt<double>(rvv_vs2_reg(), i); \
+ BODY64; \
+ break; \
+ } \
+ default: \
+ require(0); \
+ break; \
+ } \
+ RVV_VI_VFP_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VFP_LOOP_CMP_BASE \
+ for (reg_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
+ RVV_VI_LOOP_MASK_SKIP(); \
+ uint64_t mmask = uint64_t(1) << mpos; \
+ uint64_t& vdi = Rvvelt<uint64_t>(rvv_vd_reg(), midx, true); \
+ uint64_t res = 0;
+
+#define RVV_VI_VFP_LOOP_CMP_END \
+ switch (rvv_vsew()) { \
+ case E16: \
+ case E32: \
+ case E64: { \
+ vdi = (vdi & ~mmask) | (((res) << mpos) & mmask); \
+ break; \
+ } \
+ default: \
+ UNREACHABLE(); \
+ break; \
+ } \
+ } \
+ set_rvv_vstart(0); \
+ rvv_trace_vd();
+
+#define RVV_VI_VFP_LOOP_CMP(BODY16, BODY32, BODY64, is_vs1) \
+ RVV_VI_VFP_LOOP_CMP_BASE \
+ switch (rvv_vsew()) { \
+ case E16: { \
+ UNIMPLEMENTED(); \
+ } \
+ case E32: { \
+ float vs2 = Rvvelt<float>(rvv_vs2_reg(), i); \
+ float vs1 = Rvvelt<float>(rvv_vs1_reg(), i); \
+ BODY32; \
+ break; \
+ } \
+ case E64: { \
+ double vs2 = Rvvelt<double>(rvv_vs2_reg(), i); \
+ double vs1 = Rvvelt<double>(rvv_vs1_reg(), i); \
+ BODY64; \
+ break; \
+ } \
+ default: \
+ UNREACHABLE(); \
+ break; \
+ } \
+ RVV_VI_VFP_LOOP_CMP_END
+
// reduction loop - signed
#define RVV_VI_LOOP_REDUCTION_BASE(x) \
auto& vd_0_des = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), 0, true); \
@@ -537,7 +647,7 @@
#define VI_CHECK_STORE(elt_width, is_mask_ldst) \
reg_t veew = is_mask_ldst ? 1 : sizeof(elt_width##_t) * 8;
-// float vemul = is_mask_ldst ? 1 : ((float)veew / rvv_vsew() * P.VU.vflmul);
+// float vemul = is_mask_ldst ? 1 : ((float)veew / rvv_vsew() * Rvvvflmul);
// reg_t emul = vemul < 1 ? 1 : vemul;
// require(vemul >= 0.125 && vemul <= 8);
// require_align(rvv_rd(), vemul);
@@ -598,6 +708,40 @@
*reinterpret_cast<int64_t*>(&value), \
(uint64_t)(get_register(rs1_reg()))); \
}
+
+#define VI_VFP_LOOP_SCALE_BASE \
+ /*require(STATE.frm < 0x5);*/ \
+ for (reg_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
+ RVV_VI_LOOP_MASK_SKIP();
+
+#define RVV_VI_VFP_CVT_SCALE(BODY8, BODY16, BODY32, CHECK8, CHECK16, CHECK32, \
+ is_widen, eew_check) \
+ CHECK(eew_check); \
+ switch (rvv_vsew()) { \
+ case E8: { \
+ CHECK8 \
+ VI_VFP_LOOP_SCALE_BASE \
+ BODY8 /*set_fp_exceptions*/; \
+ RVV_VI_VFP_LOOP_END \
+ } break; \
+ case E16: { \
+ CHECK16 \
+ VI_VFP_LOOP_SCALE_BASE \
+ BODY16 /*set_fp_exceptions*/; \
+ RVV_VI_VFP_LOOP_END \
+ } break; \
+ case E32: { \
+ CHECK32 \
+ VI_VFP_LOOP_SCALE_BASE \
+ BODY32 /*set_fp_exceptions*/; \
+ RVV_VI_VFP_LOOP_END \
+ } break; \
+ default: \
+ require(0); \
+ break; \
+ } \
+ rvv_trace_vd();
+
namespace v8 {
namespace internal {
@@ -2599,7 +2743,17 @@ bool Simulator::CompareFHelper(T input1, T input2, FPUCondition cc) {
result = (input1 == input2);
}
break;
-
+ case NE:
+ if (std::numeric_limits<T>::signaling_NaN() == input1 ||
+ std::numeric_limits<T>::signaling_NaN() == input2) {
+ set_fflags(kInvalidOperation);
+ }
+ if (std::isnan(input1) || std::isnan(input2)) {
+ result = true;
+ } else {
+ result = (input1 != input2);
+ }
+ break;
default:
UNREACHABLE();
}
@@ -4162,6 +4316,12 @@ void Simulator::DecodeRvvIVV() {
RVV_VI_LOOP_END
break;
}
+ case RO_V_VSADDU_VV:
+ RVV_VI_VV_ULOOP({
+ vd = vs2 + vs1;
+ vd |= -(vd < vs2);
+ })
+ break;
case RO_V_VSUB_VV: {
RVV_VI_VV_LOOP({ vd = vs2 - vs1; })
break;
@@ -4305,6 +4465,8 @@ void Simulator::DecodeRvvIVV() {
}
case RO_V_VRGATHER_VV: {
RVV_VI_GENERAL_LOOP_BASE
+ CHECK_NE(rvv_vs1_reg(), rvv_vd_reg());
+ CHECK_NE(rvv_vs2_reg(), rvv_vd_reg());
switch (rvv_vsew()) {
case E8: {
auto vs1 = Rvvelt<uint8_t>(rvv_vs1_reg(), i);
@@ -4387,6 +4549,13 @@ void Simulator::DecodeRvvIVI() {
RVV_VI_LOOP_END
break;
}
+ case RO_V_VSADDU_VI:{
+ RVV_VI_VI_ULOOP({
+ vd = vs2 + uimm5;
+ vd |= -(vd < vs2);
+ })
+ break;
+ }
case RO_V_VRSUB_VI: {
RVV_VI_VI_LOOP({ vd = vs2 - simm5; })
break;
@@ -4525,6 +4694,13 @@ void Simulator::DecodeRvvIVX() {
RVV_VI_LOOP_END
break;
}
+ case RO_V_VSADDU_VX: {
+ RVV_VI_VX_ULOOP({
+ vd = vs2 + rs1;
+ vd |= -(vd < vs2);
+ })
+ break;
+ }
case RO_V_VSUB_VX: {
RVV_VI_VX_LOOP({ vd = vs2 - rs1; })
break;
@@ -4673,6 +4849,10 @@ void Simulator::DecodeRvvIVX() {
RVV_VI_VX_LOOP({ vd = vs2 << rs1; })
break;
}
+ case RO_V_VSRL_VX: {
+ RVV_VI_VX_LOOP({ vd = int32_t(uint32_t(vs2) >> (rs1 & (xlen - 1))); })
+ break;
+ }
default:
UNIMPLEMENTED_RISCV();
break;
@@ -4786,13 +4966,380 @@ void Simulator::DecodeRvvMVX() {
}
}
+void Simulator::DecodeRvvFVV() {
+ DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_FVV);
+ switch (instr_.InstructionBits() & kVTypeMask) {
+ case RO_V_VFDIV_VV: {
+ RVV_VI_VFP_VV_LOOP(
+ { UNIMPLEMENTED(); },
+ {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](float vs1, float vs2) {
+ if (is_invalid_fdiv(vs1, vs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else if (vs2 == 0.0f) {
+ this->set_fflags(kDivideByZero);
+ return (std::signbit(vs1) == std::signbit(vs2)
+ ? std::numeric_limits<float>::infinity()
+ : -std::numeric_limits<float>::infinity());
+ } else {
+ return vs1 / vs2;
+ }
+ };
+ auto alu_out = fn(vs1, vs2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<float>::quiet_NaN();
+ }
+ vd = alu_out;
+ },
+ {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double vs1, double vs2) {
+ if (is_invalid_fdiv(vs1, vs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else if (vs2 == 0.0f) {
+ this->set_fflags(kDivideByZero);
+ return (std::signbit(vs1) == std::signbit(vs2)
+ ? std::numeric_limits<double>::infinity()
+ : -std::numeric_limits<double>::infinity());
+ } else {
+ return vs1 / vs2;
+ }
+ };
+ auto alu_out = fn(vs1, vs2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<double>::quiet_NaN();
+ }
+ vd = alu_out;
+ })
+ break;
+ }
+ case RO_V_VFMUL_VV: {
+ RVV_VI_VFP_VV_LOOP(
+ { UNIMPLEMENTED(); },
+ {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2) {
+ if (is_invalid_fmul(drs1, drs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return drs1 * drs2;
+ }
+ };
+ auto alu_out = fn(vs1, vs2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<float>::quiet_NaN();
+ }
+ vd = alu_out;
+ },
+ {
+ // TODO(riscv): use rm value (round mode)
+ auto fn = [this](double drs1, double drs2) {
+ if (is_invalid_fmul(drs1, drs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return drs1 * drs2;
+ }
+ };
+ auto alu_out = fn(vs1, vs2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<double>::quiet_NaN();
+ }
+ vd = alu_out;
+ })
+ break;
+ }
+ case RO_V_VFUNARY0:
+ switch (instr_.Vs1Value()) {
+ case VFCVT_X_F_V:
+ RVV_VI_VFP_VF_LOOP(
+ { UNIMPLEMENTED(); },
+ {
+ Rvvelt<int32_t>(rvv_vd_reg(), i) =
+ RoundF2IHelper<int32_t>(vs2, read_csr_value(csr_frm));
+ USE(vd);
+ USE(fs1);
+ },
+ {
+ Rvvelt<int64_t>(rvv_vd_reg(), i) =
+ RoundF2IHelper<int64_t>(vs2, read_csr_value(csr_frm));
+ USE(vd);
+ USE(fs1);
+ })
+ break;
+ case VFCVT_XU_F_V:
+ RVV_VI_VFP_VF_LOOP(
+ { UNIMPLEMENTED(); },
+ {
+ Rvvelt<uint32_t>(rvv_vd_reg(), i) =
+ RoundF2IHelper<uint32_t>(vs2, read_csr_value(csr_frm));
+ USE(vd);
+ USE(fs1);
+ },
+ {
+ Rvvelt<uint64_t>(rvv_vd_reg(), i) =
+ RoundF2IHelper<uint64_t>(vs2, read_csr_value(csr_frm));
+ USE(vd);
+ USE(fs1);
+ })
+ break;
+ case VFCVT_F_XU_V:
+ RVV_VI_VFP_VF_LOOP({ UNIMPLEMENTED(); },
+ {
+ auto vs2_i = Rvvelt<uint32_t>(rvv_vs2_reg(), i);
+ vd = static_cast<float>(vs2_i);
+ USE(vs2);
+ USE(fs1);
+ },
+ {
+ auto vs2_i = Rvvelt<uint64_t>(rvv_vs2_reg(), i);
+ vd = static_cast<double>(vs2_i);
+ USE(vs2);
+ USE(fs1);
+ })
+ break;
+ case VFCVT_F_X_V:
+ RVV_VI_VFP_VF_LOOP({ UNIMPLEMENTED(); },
+ {
+ auto vs2_i = Rvvelt<int32_t>(rvv_vs2_reg(), i);
+ vd = static_cast<float>(vs2_i);
+ USE(vs2);
+ USE(fs1);
+ },
+ {
+ auto vs2_i = Rvvelt<int64_t>(rvv_vs2_reg(), i);
+ vd = static_cast<double>(vs2_i);
+ USE(vs2);
+ USE(fs1);
+ })
+ break;
+ case VFNCVT_F_F_W:
+ RVV_VI_VFP_CVT_SCALE(
+ { UNREACHABLE(); }, { UNREACHABLE(); },
+ {
+ auto vs2 = Rvvelt<double>(rvv_vs2_reg(), i);
+ Rvvelt<float>(rvv_vd_reg(), i, true) =
+ CanonicalizeDoubleToFloatOperation(
+ [](double drs) { return static_cast<float>(drs); },
+ vs2);
+ },
+ { ; }, { ; }, { ; }, false, (rvv_vsew() >= E16))
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+ break;
+ case RO_V_VFUNARY1:
+ switch (instr_.Vs1Value()) {
+ case VFCLASS_V:
+ RVV_VI_VFP_VF_LOOP(
+ { UNIMPLEMENTED(); },
+ {
+ int32_t& vd_i = Rvvelt<int32_t>(rvv_vd_reg(), i, true);
+ vd_i = int32_t(FclassHelper(vs2));
+ USE(fs1);
+ USE(vd);
+ },
+ {
+ int64_t& vd_i = Rvvelt<int64_t>(rvv_vd_reg(), i, true);
+ vd_i = FclassHelper(vs2);
+ USE(fs1);
+ USE(vd);
+ })
+ break;
+ default:
+ break;
+ }
+ break;
+ case RO_V_VMFEQ_VV: {
+ RVV_VI_VFP_LOOP_CMP({ UNIMPLEMENTED(); },
+ { res = CompareFHelper(vs1, vs2, EQ); },
+ { res = CompareFHelper(vs1, vs2, EQ); }, true)
+ } break;
+ case RO_V_VMFNE_VV: {
+ RVV_VI_VFP_LOOP_CMP({ UNIMPLEMENTED(); },
+ { res = CompareFHelper(vs1, vs2, NE); },
+ { res = CompareFHelper(vs1, vs2, NE); }, true)
+ } break;
+ case RO_V_VMFLT_VV: {
+ RVV_VI_VFP_LOOP_CMP({ UNIMPLEMENTED(); },
+ { res = CompareFHelper(vs1, vs2, LT); },
+ { res = CompareFHelper(vs1, vs2, LT); }, true)
+ } break;
+ case RO_V_VMFLE_VV: {
+ RVV_VI_VFP_LOOP_CMP({ UNIMPLEMENTED(); },
+ { res = CompareFHelper(vs1, vs2, LE); },
+ { res = CompareFHelper(vs1, vs2, LE); }, true)
+ } break;
+ case RO_V_VFMAX_VV: {
+ RVV_VI_VFP_VV_LOOP({ UNIMPLEMENTED(); },
+ { vd = FMaxMinHelper(vs2, vs1, MaxMinKind::kMax); },
+ { vd = FMaxMinHelper(vs2, vs1, MaxMinKind::kMax); })
+ break;
+ }
+ case RO_V_VFMIN_VV: {
+ RVV_VI_VFP_VV_LOOP({ UNIMPLEMENTED(); },
+ { vd = FMaxMinHelper(vs2, vs1, MaxMinKind::kMin); },
+ { vd = FMaxMinHelper(vs2, vs1, MaxMinKind::kMin); })
+ break;
+ }
+ case RO_V_VFSGNJ_VV:
+ RVV_VI_VFP_VV_LOOP({ UNIMPLEMENTED(); },
+ { vd = fsgnj32(vs2, vs1, false, false); },
+ { vd = fsgnj64(vs2, vs1, false, false); })
+ break;
+ case RO_V_VFSGNJN_VV:
+ RVV_VI_VFP_VV_LOOP({ UNIMPLEMENTED(); },
+ { vd = fsgnj32(vs2, vs1, true, false); },
+ { vd = fsgnj64(vs2, vs1, true, false); })
+ break;
+ case RO_V_VFSGNJX_VV:
+ RVV_VI_VFP_VV_LOOP({ UNIMPLEMENTED(); },
+ { vd = fsgnj32(vs2, vs1, false, true); },
+ { vd = fsgnj64(vs2, vs1, false, true); })
+ break;
+ case RO_V_VFADD_VV:
+ RVV_VI_VFP_VV_LOOP(
+ { UNIMPLEMENTED(); },
+ {
+ auto fn = [this](float frs1, float frs2) {
+ if (is_invalid_fadd(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return frs1 + frs2;
+ }
+ };
+ auto alu_out = fn(vs1, vs2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<float>::quiet_NaN();
+ }
+ vd = alu_out;
+ },
+ {
+ auto fn = [this](double frs1, double frs2) {
+ if (is_invalid_fadd(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return frs1 + frs2;
+ }
+ };
+ auto alu_out = fn(vs1, vs2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<double>::quiet_NaN();
+ }
+ vd = alu_out;
+ })
+ break;
+ case RO_V_VFSUB_VV:
+ RVV_VI_VFP_VV_LOOP(
+ { UNIMPLEMENTED(); },
+ {
+ auto fn = [this](float frs1, float frs2) {
+ if (is_invalid_fsub(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<float>::quiet_NaN();
+ } else {
+ return frs2 - frs1;
+ }
+ };
+ auto alu_out = fn(vs1, vs2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<float>::quiet_NaN();
+ }
+
+ vd = alu_out;
+ },
+ {
+ auto fn = [this](double frs1, double frs2) {
+ if (is_invalid_fsub(frs1, frs2)) {
+ this->set_fflags(kInvalidOperation);
+ return std::numeric_limits<double>::quiet_NaN();
+ } else {
+ return frs2 - frs1;
+ }
+ };
+ auto alu_out = fn(vs1, vs2);
+ // if any input or result is NaN, the result is quiet_NaN
+ if (std::isnan(alu_out) || std::isnan(vs1) || std::isnan(vs2)) {
+ // signaling_nan sets kInvalidOperation bit
+ if (isSnan(alu_out) || isSnan(vs1) || isSnan(vs2))
+ set_fflags(kInvalidOperation);
+ alu_out = std::numeric_limits<double>::quiet_NaN();
+ }
+ vd = alu_out;
+ })
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
+void Simulator::DecodeRvvFVF() {
+ DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_FVF);
+ switch (instr_.InstructionBits() & kVTypeMask) {
+ case RO_V_VFSGNJ_VF:
+ RVV_VI_VFP_VF_LOOP(
+ {}, { vd = fsgnj32(vs2, fs1, false, false); },
+ { vd = fsgnj64(vs2, fs1, false, false); })
+ break;
+ case RO_V_VFSGNJN_VF:
+ RVV_VI_VFP_VF_LOOP(
+ {}, { vd = fsgnj32(vs2, fs1, true, false); },
+ { vd = fsgnj64(vs2, fs1, true, false); })
+ break;
+ case RO_V_VFSGNJX_VF:
+ RVV_VI_VFP_VF_LOOP(
+ {}, { vd = fsgnj32(vs2, fs1, false, true); },
+ { vd = fsgnj64(vs2, fs1, false, true); })
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
void Simulator::DecodeVType() {
switch (instr_.InstructionBits() & (kFunct3Mask | kBaseOpcodeMask)) {
case OP_IVV:
DecodeRvvIVV();
return;
case OP_FVV:
- UNIMPLEMENTED_RISCV();
+ DecodeRvvFVV();
return;
case OP_MVV:
DecodeRvvMVV();
@@ -4839,9 +5386,9 @@ void Simulator::DecodeVType() {
} else {
avl = rvv_vl();
}
- avl = avl <= rvv_vlmax()
- ? avl
- : avl < (rvv_vlmax() * 2) ? avl / 2 : rvv_vlmax();
+ avl = avl <= rvv_vlmax() ? avl
+ : avl < (rvv_vlmax() * 2) ? avl / 2
+ : rvv_vlmax();
set_rvv_vl(avl);
set_rd(rvv_vl());
rvv_trace_status();
@@ -4852,9 +5399,9 @@ void Simulator::DecodeVType() {
uint64_t avl;
set_rvv_vtype(rvv_zimm());
avl = instr_.Rvvuimm();
- avl = avl <= rvv_vlmax()
- ? avl
- : avl < (rvv_vlmax() * 2) ? avl / 2 : rvv_vlmax();
+ avl = avl <= rvv_vlmax() ? avl
+ : avl < (rvv_vlmax() * 2) ? avl / 2
+ : rvv_vlmax();
set_rvv_vl(avl);
set_rd(rvv_vl());
rvv_trace_status();
diff --git a/deps/v8/src/execution/riscv64/simulator-riscv64.h b/deps/v8/src/execution/riscv64/simulator-riscv64.h
index 90f0edec4c..fce6cdca0a 100644
--- a/deps/v8/src/execution/riscv64/simulator-riscv64.h
+++ b/deps/v8/src/execution/riscv64/simulator-riscv64.h
@@ -132,8 +132,11 @@ union u32_f32 {
inline float fsgnj32(float rs1, float rs2, bool n, bool x) {
u32_f32 a = {.f = rs1}, b = {.f = rs2};
u32_f32 res;
- res.u =
- (a.u & ~F32_SIGN) | ((((x) ? a.u : (n) ? F32_SIGN : 0) ^ b.u) & F32_SIGN);
+ res.u = (a.u & ~F32_SIGN) | ((((x) ? a.u
+ : (n) ? F32_SIGN
+ : 0) ^
+ b.u) &
+ F32_SIGN);
return res.f;
}
#define F64_SIGN ((uint64_t)1 << 63)
@@ -144,8 +147,11 @@ union u64_f64 {
inline double fsgnj64(double rs1, double rs2, bool n, bool x) {
u64_f64 a = {.d = rs1}, b = {.d = rs2};
u64_f64 res;
- res.u =
- (a.u & ~F64_SIGN) | ((((x) ? a.u : (n) ? F64_SIGN : 0) ^ b.u) & F64_SIGN);
+ res.u = (a.u & ~F64_SIGN) | ((((x) ? a.u
+ : (n) ? F64_SIGN
+ : 0) ^
+ b.u) &
+ F64_SIGN);
return res.d;
}
@@ -924,6 +930,22 @@ class Simulator : public SimulatorBase {
}
template <typename Func>
+ inline float CanonicalizeDoubleToFloatOperation(Func fn, double frs) {
+ float alu_out = fn(frs);
+ if (std::isnan(alu_out) || std::isnan(drs1()))
+ alu_out = std::numeric_limits<float>::quiet_NaN();
+ return alu_out;
+ }
+
+ template <typename Func>
+ inline float CanonicalizeFloatToDoubleOperation(Func fn, float frs) {
+ double alu_out = fn(frs);
+ if (std::isnan(alu_out) || std::isnan(frs1()))
+ alu_out = std::numeric_limits<double>::quiet_NaN();
+ return alu_out;
+ }
+
+ template <typename Func>
inline float CanonicalizeFloatToDoubleOperation(Func fn) {
double alu_out = fn(frs1());
if (std::isnan(alu_out) || std::isnan(frs1()))
@@ -957,6 +979,8 @@ class Simulator : public SimulatorBase {
void DecodeRvvIVX();
void DecodeRvvMVV();
void DecodeRvvMVX();
+ void DecodeRvvFVV();
+ void DecodeRvvFVF();
bool DecodeRvvVL();
bool DecodeRvvVS();
diff --git a/deps/v8/src/execution/s390/simulator-s390.cc b/deps/v8/src/execution/s390/simulator-s390.cc
index 31a03eed4e..4d386e65b8 100644
--- a/deps/v8/src/execution/s390/simulator-s390.cc
+++ b/deps/v8/src/execution/s390/simulator-s390.cc
@@ -2207,7 +2207,18 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
set_register(r2, result_buffer);
}
} else {
- DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
+ // FAST_C_CALL is temporarily handled here as well, because we lack
+ // proper support for direct C calls with FP params in the simulator.
+ // The generic BUILTIN_CALL path assumes all parameters are passed in
+ // the GP registers, thus supporting calling the slow callback without
+ // crashing. The reason for that is that in the mjsunit tests we check
+ // the `fast_c_api.supports_fp_params` (which is false on
+ // non-simulator builds for arm/arm64), thus we expect that the slow
+ // path will be called. And since the slow path passes the arguments
+ // as a `const FunctionCallbackInfo<Value>&` (which is a GP argument),
+ // the call is made correctly.
+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
+ redirection->type() == ExternalReference::FAST_C_CALL);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
intptr_t result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
diff --git a/deps/v8/src/execution/v8threads.cc b/deps/v8/src/execution/v8threads.cc
index 3138823f7b..9fb8f1c30c 100644
--- a/deps/v8/src/execution/v8threads.cc
+++ b/deps/v8/src/execution/v8threads.cc
@@ -20,7 +20,7 @@ namespace {
// Track whether this V8 instance has ever called v8::Locker. This allows the
// API code to verify that the lock is always held when V8 is being entered.
-base::Atomic32 g_locker_was_ever_used_ = 0;
+base::AtomicWord g_locker_was_ever_used_ = 0;
} // namespace
@@ -53,8 +53,12 @@ bool Locker::IsLocked(v8::Isolate* isolate) {
return internal_isolate->thread_manager()->IsLockedByCurrentThread();
}
-bool Locker::IsActive() {
- return !!base::Relaxed_Load(&g_locker_was_ever_used_);
+// static
+bool Locker::IsActive() { return WasEverUsed(); }
+
+// static
+bool Locker::WasEverUsed() {
+ return base::Relaxed_Load(&g_locker_was_ever_used_) != 0;
}
Locker::~Locker() {
diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h
index ca8ed311a8..12ecfc9d45 100644
--- a/deps/v8/src/flags/flag-definitions.h
+++ b/deps/v8/src/flags/flag-definitions.h
@@ -211,6 +211,13 @@ struct MaybeBoolFlag {
#define ENABLE_SPARKPLUG_BY_DEFAULT false
#endif
+#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
+// Must be enabled on M1.
+#define MUST_WRITE_PROTECT_CODE_MEMORY true
+#else
+#define MUST_WRITE_PROTECT_CODE_MEMORY false
+#endif
+
// Supported ARM configurations are:
// "armv6": ARMv6 + VFPv2
// "armv7": ARMv7 + VFPv3-D32 + NEON
@@ -292,7 +299,6 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
// Features that are still work in progress (behind individual flags).
#define HARMONY_INPROGRESS_BASE(V) \
- V(harmony_regexp_sequence, "RegExp Unicode sequence properties") \
V(harmony_weak_refs_with_cleanup_some, \
"harmony weak references with FinalizationRegistry.prototype.cleanupSome") \
V(harmony_import_assertions, "harmony import assertions") \
@@ -502,7 +508,6 @@ DEFINE_BOOL(future, FUTURE_BOOL,
"Implies all staged features that we want to ship in the "
"not-too-far future")
-DEFINE_WEAK_IMPLICATION(future, turbo_inline_js_wasm_calls)
#if ENABLE_SPARKPLUG
DEFINE_WEAK_IMPLICATION(future, sparkplug)
DEFINE_WEAK_IMPLICATION(future, flush_baseline_code)
@@ -510,6 +515,9 @@ DEFINE_WEAK_IMPLICATION(future, flush_baseline_code)
#if V8_SHORT_BUILTIN_CALLS
DEFINE_WEAK_IMPLICATION(future, short_builtin_calls)
#endif
+#if !MUST_WRITE_PROTECT_CODE_MEMORY
+DEFINE_WEAK_VALUE_IMPLICATION(future, write_protect_code_memory, false)
+#endif
// Flags for jitless
DEFINE_BOOL(jitless, V8_LITE_BOOL,
@@ -720,7 +728,7 @@ DEFINE_INT(concurrent_recompilation_queue_length, 8,
"the length of the concurrent compilation queue")
DEFINE_INT(concurrent_recompilation_delay, 0,
"artificial compilation delay in ms")
-DEFINE_BOOL(concurrent_inlining, false,
+DEFINE_BOOL(concurrent_inlining, true,
"run optimizing compiler's inlining phase on a separate thread")
DEFINE_BOOL(
stress_concurrent_inlining, false,
@@ -947,6 +955,9 @@ DEFINE_BOOL(wasm_tier_up, true,
"have an effect)")
DEFINE_BOOL(wasm_dynamic_tiering, false,
"enable dynamic tier up to the optimizing compiler")
+DEFINE_INT(
+ wasm_caching_threshold, 1000000,
+ "the amount of wasm top tier code that triggers the next caching event")
DEFINE_DEBUG_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
DEFINE_DEBUG_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
DEFINE_DEBUG_BOOL(trace_wasm_interpreter, false,
@@ -1181,7 +1192,12 @@ DEFINE_INT(scavenge_task_trigger, 80,
DEFINE_BOOL(scavenge_separate_stack_scanning, false,
"use a separate phase for stack scanning in scavenge")
DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge")
+#if MUST_WRITE_PROTECT_CODE_MEMORY
+DEFINE_BOOL_READONLY(write_protect_code_memory, true,
+ "write protect code memory")
+#else
DEFINE_BOOL(write_protect_code_memory, true, "write protect code memory")
+#endif
#if defined(V8_ATOMIC_MARKING_STATE) && defined(V8_ATOMIC_OBJECT_FIELD_WRITES)
#define V8_CONCURRENT_MARKING_BOOL true
#else
@@ -1803,6 +1819,8 @@ DEFINE_BOOL_READONLY(minor_mc, false,
//
DEFINE_BOOL(help, false, "Print usage message, including flags, on console")
+DEFINE_BOOL(print_flag_values, false, "Print all flag values of V8")
+
DEFINE_BOOL(dump_counters, false, "Dump counters on exit")
DEFINE_BOOL(slow_histograms, false,
"Enable slow histograms with more overhead.")
@@ -1984,7 +2002,9 @@ DEFINE_PERF_PROF_BOOL(
"Remove the perf file right after creating it (for testing only).")
DEFINE_NEG_IMPLICATION(perf_prof, compact_code_space)
// TODO(v8:8462) Remove implication once perf supports remapping.
+#if !MUST_WRITE_PROTECT_CODE_MEMORY
DEFINE_NEG_IMPLICATION(perf_prof, write_protect_code_memory)
+#endif
#if V8_ENABLE_WEBASSEMBLY
DEFINE_NEG_IMPLICATION(perf_prof, wasm_write_protect_code_memory)
#endif // V8_ENABLE_WEBASSEMBLY
diff --git a/deps/v8/src/flags/flags.cc b/deps/v8/src/flags/flags.cc
index 4bf401b73c..66ad2974d0 100644
--- a/deps/v8/src/flags/flags.cc
+++ b/deps/v8/src/flags/flags.cc
@@ -9,6 +9,7 @@
#include <cinttypes>
#include <cstdlib>
#include <cstring>
+#include <iomanip>
#include <sstream>
#include "src/base/functional.h"
@@ -40,6 +41,8 @@ namespace internal {
namespace {
+char NormalizeChar(char ch) { return ch == '_' ? '-' : ch; }
+
struct Flag;
Flag* FindFlagByPointer(const void* ptr);
Flag* FindFlagByName(const char* name);
@@ -380,8 +383,6 @@ Flag flags[] = {
const size_t num_flags = sizeof(flags) / sizeof(*flags);
-inline char NormalizeChar(char ch) { return ch == '_' ? '-' : ch; }
-
bool EqualNames(const char* a, const char* b) {
for (int i = 0; NormalizeChar(a[i]) == NormalizeChar(b[i]); i++) {
if (a[i] == '\0') {
@@ -429,7 +430,27 @@ static const char* Type2String(Flag::FlagType type) {
UNREACHABLE();
}
-std::ostream& operator<<(std::ostream& os, const Flag& flag) {
+// Helper struct for printing normalize Flag names.
+struct FlagName {
+ explicit FlagName(const Flag& flag) : flag(flag) {}
+ const Flag& flag;
+};
+
+std::ostream& operator<<(std::ostream& os, const FlagName& flag_name) {
+ for (const char* c = flag_name.flag.name(); *c != '\0'; ++c) {
+ os << NormalizeChar(*c);
+ }
+ return os;
+}
+
+// Helper for printing flag values.
+struct FlagValue {
+ explicit FlagValue(const Flag& flag) : flag(flag) {}
+ const Flag& flag;
+};
+
+std::ostream& operator<<(std::ostream& os, const FlagValue& flag_value) {
+ const Flag& flag = flag_value.flag;
switch (flag.type()) {
case Flag::TYPE_BOOL:
os << (flag.bool_variable() ? "true" : "false");
@@ -456,33 +477,20 @@ std::ostream& operator<<(std::ostream& os, const Flag& flag) {
break;
case Flag::TYPE_STRING: {
const char* str = flag.string_value();
- os << (str ? str : "nullptr");
+ os << std::quoted(str ? str : "");
break;
}
}
return os;
}
-// static
-std::vector<const char*>* FlagList::argv() {
- std::vector<const char*>* args = new std::vector<const char*>(8);
- for (size_t i = 0; i < num_flags; ++i) {
- Flag* f = &flags[i];
- if (!f->IsDefault()) {
- {
- bool disabled = f->type() == Flag::TYPE_BOOL && !f->bool_variable();
- std::ostringstream os;
- os << (disabled ? "--no" : "--") << f->name();
- args->push_back(StrDup(os.str().c_str()));
- }
- if (f->type() != Flag::TYPE_BOOL) {
- std::ostringstream os;
- os << *f;
- args->push_back(StrDup(os.str().c_str()));
- }
- }
+std::ostream& operator<<(std::ostream& os, const Flag& flag) {
+ if (flag.type() == Flag::TYPE_BOOL) {
+ os << (flag.bool_variable() ? "--" : "--no") << FlagName(flag);
+ } else {
+ os << "--" << FlagName(flag) << "=" << FlagValue(flag);
}
- return args;
+ return os;
}
// Helper function to parse flags: Takes an argument arg and splits it into
@@ -768,16 +776,20 @@ void FlagList::PrintHelp() {
os << "Options:\n";
for (const Flag& f : flags) {
- os << " --";
- for (const char* c = f.name(); *c != '\0'; ++c) {
- os << NormalizeChar(*c);
- }
- os << " (" << f.comment() << ")\n"
+ os << " --" << FlagName(f) << " (" << f.comment() << ")\n"
<< " type: " << Type2String(f.type()) << " default: " << f
<< "\n";
}
}
+// static
+void FlagList::PrintValues() {
+ StdoutStream os;
+ for (const Flag& f : flags) {
+ os << f << "\n";
+ }
+}
+
namespace {
static uint32_t flag_hash = 0;
diff --git a/deps/v8/src/flags/flags.h b/deps/v8/src/flags/flags.h
index 753da04c2d..07a29af5d4 100644
--- a/deps/v8/src/flags/flags.h
+++ b/deps/v8/src/flags/flags.h
@@ -19,15 +19,6 @@ namespace internal {
// The global list of all flags.
class V8_EXPORT_PRIVATE FlagList {
public:
- // The list of all flags with a value different from the default
- // and their values. The format of the list is like the format of the
- // argv array passed to the main function, e.g.
- // ("--prof", "--log-file", "v8.prof", "--nolazy").
- //
- // The caller is responsible for disposing the list, as well
- // as every element of it.
- static std::vector<const char*>* argv();
-
class HelpOptions {
public:
enum ExitBehavior : bool { kExit = true, kDontExit = false };
@@ -78,6 +69,8 @@ class V8_EXPORT_PRIVATE FlagList {
// Print help to stdout with flags, types, and default values.
static void PrintHelp();
+ static void PrintValues();
+
// Set flags as consequence of being implied by another flag.
static void EnforceFlagImplications();
diff --git a/deps/v8/src/handles/global-handles-inl.h b/deps/v8/src/handles/global-handles-inl.h
new file mode 100644
index 0000000000..1f86e2dcb4
--- /dev/null
+++ b/deps/v8/src/handles/global-handles-inl.h
@@ -0,0 +1,33 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HANDLES_GLOBAL_HANDLES_INL_H_
+#define V8_HANDLES_GLOBAL_HANDLES_INL_H_
+
+#include "src/handles/global-handles.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/heap-object-inl.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename T>
+Handle<T> GlobalHandles::Create(T value) {
+ static_assert(std::is_base_of<Object, T>::value, "static type violation");
+ // The compiler should only pick this method if T is not Object.
+ static_assert(!std::is_same<Object, T>::value, "compiler error");
+ return Handle<T>::cast(Create(Object(value)));
+}
+
+template <typename T>
+T GlobalHandleVector<T>::Pop() {
+ T obj = T::cast(Object(locations_.back()));
+ locations_.pop_back();
+ return obj;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HANDLES_GLOBAL_HANDLES_INL_H_
diff --git a/deps/v8/src/handles/global-handles.h b/deps/v8/src/handles/global-handles.h
index d7f68e5b55..296b0704b2 100644
--- a/deps/v8/src/handles/global-handles.h
+++ b/deps/v8/src/handles/global-handles.h
@@ -15,6 +15,7 @@
#include "include/v8-profiler.h"
#include "src/handles/handles.h"
#include "src/heap/heap.h"
+#include "src/objects/heap-object.h"
#include "src/objects/objects.h"
#include "src/utils/utils.h"
@@ -101,12 +102,7 @@ class V8_EXPORT_PRIVATE GlobalHandles final {
Handle<Object> Create(Address value);
template <typename T>
- Handle<T> Create(T value) {
- static_assert(std::is_base_of<Object, T>::value, "static type violation");
- // The compiler should only pick this method if T is not Object.
- static_assert(!std::is_same<Object, T>::value, "compiler error");
- return Handle<T>::cast(Create(Object(value)));
- }
+ inline Handle<T> Create(T value);
Handle<Object> CreateTraced(Object value, Address* slot, bool has_destructor,
bool is_on_stack);
@@ -358,11 +354,7 @@ class GlobalHandleVector {
void Push(T val) { locations_.push_back(val.ptr()); }
// Handles into the GlobalHandleVector become invalid when they are removed,
// so "pop" returns a raw object rather than a handle.
- T Pop() {
- T obj = T::cast(Object(locations_.back()));
- locations_.pop_back();
- return obj;
- }
+ inline T Pop();
Iterator begin() { return Iterator(locations_.begin()); }
Iterator end() { return Iterator(locations_.end()); }
diff --git a/deps/v8/src/heap/array-buffer-sweeper.cc b/deps/v8/src/heap/array-buffer-sweeper.cc
index 2bdcec0bf7..cdab2a9aab 100644
--- a/deps/v8/src/heap/array-buffer-sweeper.cc
+++ b/deps/v8/src/heap/array-buffer-sweeper.cc
@@ -5,6 +5,7 @@
#include "src/heap/array-buffer-sweeper.h"
#include <atomic>
+#include <memory>
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
@@ -24,7 +25,9 @@ void ArrayBufferList::Append(ArrayBufferExtension* extension) {
tail_ = extension;
}
- bytes_ += extension->accounting_length();
+ const size_t accounting_length = extension->accounting_length();
+ DCHECK_GE(bytes_ + accounting_length, bytes_);
+ bytes_ += accounting_length;
extension->set_next(nullptr);
}
@@ -41,115 +44,119 @@ void ArrayBufferList::Append(ArrayBufferList* list) {
DCHECK_NULL(list->tail_);
}
- bytes_ += list->Bytes();
- list->Reset();
+ bytes_ += list->ApproximateBytes();
+ *list = ArrayBufferList();
}
-bool ArrayBufferList::Contains(ArrayBufferExtension* extension) {
- ArrayBufferExtension* current = head_;
-
- while (current) {
+bool ArrayBufferList::ContainsSlow(ArrayBufferExtension* extension) const {
+ for (ArrayBufferExtension* current = head_; current;
+ current = current->next()) {
if (current == extension) return true;
- current = current->next();
}
-
return false;
}
-size_t ArrayBufferList::BytesSlow() {
+size_t ArrayBufferList::BytesSlow() const {
ArrayBufferExtension* current = head_;
size_t sum = 0;
-
while (current) {
sum += current->accounting_length();
current = current->next();
}
-
+ DCHECK_GE(sum, ApproximateBytes());
return sum;
}
+bool ArrayBufferList::IsEmpty() const {
+ DCHECK_IMPLIES(head_, tail_);
+ DCHECK_IMPLIES(!head_, bytes_ == 0);
+ return head_ == nullptr;
+}
+
+struct ArrayBufferSweeper::SweepingJob final {
+ SweepingJob(ArrayBufferList young, ArrayBufferList old, SweepingType type)
+ : state_(SweepingState::kInProgress),
+ young_(std::move(young)),
+ old_(std::move(old)),
+ type_(type) {}
+
+ void Sweep();
+ void SweepYoung();
+ void SweepFull();
+ ArrayBufferList SweepListFull(ArrayBufferList* list);
+
+ private:
+ CancelableTaskManager::Id id_ = CancelableTaskManager::kInvalidTaskId;
+ std::atomic<SweepingState> state_;
+ ArrayBufferList young_;
+ ArrayBufferList old_;
+ const SweepingType type_;
+ std::atomic<size_t> freed_bytes_{0};
+
+ friend class ArrayBufferSweeper;
+};
+
+ArrayBufferSweeper::ArrayBufferSweeper(Heap* heap) : heap_(heap) {}
+
+ArrayBufferSweeper::~ArrayBufferSweeper() {
+ EnsureFinished();
+ ReleaseAll(&old_);
+ ReleaseAll(&young_);
+}
+
void ArrayBufferSweeper::EnsureFinished() {
- if (!sweeping_in_progress_) return;
+ if (!sweeping_in_progress()) return;
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_COMPLETE_SWEEP_ARRAY_BUFFERS);
TryAbortResult abort_result =
heap_->isolate()->cancelable_task_manager()->TryAbort(job_->id_);
switch (abort_result) {
- case TryAbortResult::kTaskAborted: {
+ case TryAbortResult::kTaskAborted:
+ // Task has not run, so we need to run it synchronously here.
job_->Sweep();
- Merge();
break;
- }
-
- case TryAbortResult::kTaskRemoved: {
- if (job_->state_ == SweepingState::kInProgress) job_->Sweep();
- if (job_->state_ == SweepingState::kDone) Merge();
+ case TryAbortResult::kTaskRemoved:
+ // Task was removed, but did actually run, just ensure we are in the right
+ // state.
+ CHECK_EQ(SweepingState::kDone, job_->state_);
break;
- }
-
case TryAbortResult::kTaskRunning: {
+ // Task is running. Wait until task is finished with its work.
base::MutexGuard guard(&sweeping_mutex_);
- // Wait until task is finished with its work.
while (job_->state_ != SweepingState::kDone) {
job_finished_.Wait(&sweeping_mutex_);
}
- Merge();
break;
}
-
- default:
- UNREACHABLE();
}
- UpdateCountersForConcurrentlySweptExtensions();
+ Finalize();
DCHECK_LE(heap_->backing_store_bytes(), SIZE_MAX);
- sweeping_in_progress_ = false;
+ DCHECK(!sweeping_in_progress());
}
-void ArrayBufferSweeper::MergeBackExtensionsWhenSwept() {
- if (sweeping_in_progress_) {
- DCHECK(job_.has_value());
+void ArrayBufferSweeper::FinishIfDone() {
+ if (sweeping_in_progress()) {
+ DCHECK(job_);
if (job_->state_ == SweepingState::kDone) {
- Merge();
- sweeping_in_progress_ = false;
+ Finalize();
}
- // Update freed counters either way. It is necessary to update the counter
- // in case sweeping is done to avoid counter overflows.
- UpdateCountersForConcurrentlySweptExtensions();
}
}
-void ArrayBufferSweeper::UpdateCountersForConcurrentlySweptExtensions() {
- size_t freed_bytes = freed_bytes_.exchange(0, std::memory_order_relaxed);
- DecrementExternalMemoryCounters(freed_bytes);
-}
-
-void ArrayBufferSweeper::RequestSweepYoung() {
- RequestSweep(SweepingScope::kYoung);
-}
+void ArrayBufferSweeper::RequestSweep(SweepingType type) {
+ DCHECK(!sweeping_in_progress());
-void ArrayBufferSweeper::RequestSweepFull() {
- RequestSweep(SweepingScope::kFull);
-}
-
-size_t ArrayBufferSweeper::YoungBytes() { return young_bytes_; }
-
-size_t ArrayBufferSweeper::OldBytes() { return old_bytes_; }
-
-void ArrayBufferSweeper::RequestSweep(SweepingScope scope) {
- DCHECK(!sweeping_in_progress_);
-
- if (young_.IsEmpty() && (old_.IsEmpty() || scope == SweepingScope::kYoung))
+ if (young_.IsEmpty() && (old_.IsEmpty() || type == SweepingType::kYoung))
return;
+ Prepare(type);
if (!heap_->IsTearingDown() && !heap_->ShouldReduceMemory() &&
FLAG_concurrent_array_buffer_sweeping) {
- Prepare(scope);
-
- auto task = MakeCancelableTask(heap_->isolate(), [this, scope] {
+ auto task = MakeCancelableTask(heap_->isolate(), [this, type] {
GCTracer::Scope::ScopeId scope_id =
- scope == SweepingScope::kYoung
+ type == SweepingType::kYoung
? GCTracer::Scope::BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP
: GCTracer::Scope::BACKGROUND_FULL_ARRAY_BUFFER_SWEEP;
TRACE_GC_EPOCH(heap_->tracer(), scope_id, ThreadKind::kBackground);
@@ -159,74 +166,64 @@ void ArrayBufferSweeper::RequestSweep(SweepingScope scope) {
});
job_->id_ = task->id();
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
- sweeping_in_progress_ = true;
} else {
- Prepare(scope);
job_->Sweep();
- Merge();
- UpdateCountersForConcurrentlySweptExtensions();
+ Finalize();
}
}
-void ArrayBufferSweeper::Prepare(SweepingScope scope) {
- DCHECK(!job_.has_value());
-
- if (scope == SweepingScope::kYoung) {
- job_.emplace(this, young_, ArrayBufferList(), SweepingScope::kYoung);
- young_.Reset();
- young_bytes_ = 0;
- } else {
- CHECK_EQ(scope, SweepingScope::kFull);
- job_.emplace(this, young_, old_, SweepingScope::kFull);
- young_.Reset();
- old_.Reset();
- young_bytes_ = old_bytes_ = 0;
+void ArrayBufferSweeper::Prepare(SweepingType type) {
+ DCHECK(!sweeping_in_progress());
+ switch (type) {
+ case SweepingType::kYoung: {
+ job_ = std::make_unique<SweepingJob>(std::move(young_), ArrayBufferList(),
+ type);
+ young_ = ArrayBufferList();
+ } break;
+ case SweepingType::kFull: {
+ job_ = std::make_unique<SweepingJob>(std::move(young_), std::move(old_),
+ type);
+ young_ = ArrayBufferList();
+ old_ = ArrayBufferList();
+ } break;
}
+ DCHECK(sweeping_in_progress());
}
-void ArrayBufferSweeper::Merge() {
- DCHECK(job_.has_value());
+void ArrayBufferSweeper::Finalize() {
+ DCHECK(sweeping_in_progress());
CHECK_EQ(job_->state_, SweepingState::kDone);
young_.Append(&job_->young_);
old_.Append(&job_->old_);
- young_bytes_ = young_.Bytes();
- old_bytes_ = old_.Bytes();
-
+ const size_t freed_bytes =
+ job_->freed_bytes_.exchange(0, std::memory_order_relaxed);
+ DecrementExternalMemoryCounters(freed_bytes);
job_.reset();
-}
-
-void ArrayBufferSweeper::ReleaseAll() {
- EnsureFinished();
- ReleaseAll(&old_);
- ReleaseAll(&young_);
- old_bytes_ = young_bytes_ = 0;
+ DCHECK(!sweeping_in_progress());
}
void ArrayBufferSweeper::ReleaseAll(ArrayBufferList* list) {
ArrayBufferExtension* current = list->head_;
-
while (current) {
ArrayBufferExtension* next = current->next();
delete current;
current = next;
}
-
- list->Reset();
+ *list = ArrayBufferList();
}
void ArrayBufferSweeper::Append(JSArrayBuffer object,
ArrayBufferExtension* extension) {
size_t bytes = extension->accounting_length();
+ FinishIfDone();
+
if (Heap::InYoungGeneration(object)) {
young_.Append(extension);
- young_bytes_ += bytes;
} else {
old_.Append(extension);
- old_bytes_ += bytes;
}
- MergeBackExtensionsWhenSwept();
IncrementExternalMemoryCounters(bytes);
}
@@ -237,21 +234,21 @@ void ArrayBufferSweeper::Detach(JSArrayBuffer object,
// We cannot free the extension eagerly here, since extensions are tracked in
// a singly linked list. The next GC will remove it automatically.
- if (!sweeping_in_progress_) {
+ FinishIfDone();
+
+ if (!sweeping_in_progress()) {
// If concurrent sweeping isn't running at the moment, we can also adjust
- // young_bytes_ or old_bytes_ right away.
+ // the respective bytes in the corresponding ArraybufferLists as they are
+ // only approximate.
if (Heap::InYoungGeneration(object)) {
- DCHECK_GE(young_bytes_, bytes);
- young_bytes_ -= bytes;
+ DCHECK_GE(young_.bytes_, bytes);
young_.bytes_ -= bytes;
} else {
- DCHECK_GE(old_bytes_, bytes);
- old_bytes_ -= bytes;
+ DCHECK_GE(old_.bytes_, bytes);
old_.bytes_ -= bytes;
}
}
- MergeBackExtensionsWhenSwept();
DecrementExternalMemoryCounters(bytes);
}
@@ -272,29 +269,25 @@ void ArrayBufferSweeper::DecrementExternalMemoryCounters(size_t bytes) {
heap_->update_external_memory(-static_cast<int64_t>(bytes));
}
-void ArrayBufferSweeper::IncrementFreedBytes(size_t bytes) {
- if (bytes == 0) return;
- freed_bytes_.fetch_add(bytes, std::memory_order_relaxed);
-}
-
void ArrayBufferSweeper::SweepingJob::Sweep() {
CHECK_EQ(state_, SweepingState::kInProgress);
-
- if (scope_ == SweepingScope::kYoung) {
- SweepYoung();
- } else {
- CHECK_EQ(scope_, SweepingScope::kFull);
- SweepFull();
+ switch (type_) {
+ case SweepingType::kYoung:
+ SweepYoung();
+ break;
+ case SweepingType::kFull:
+ SweepFull();
+ break;
}
state_ = SweepingState::kDone;
}
void ArrayBufferSweeper::SweepingJob::SweepFull() {
- CHECK_EQ(scope_, SweepingScope::kFull);
+ DCHECK_EQ(SweepingType::kFull, type_);
ArrayBufferList promoted = SweepListFull(&young_);
ArrayBufferList survived = SweepListFull(&old_);
- old_ = promoted;
+ old_ = std::move(promoted);
old_.Append(&survived);
}
@@ -307,9 +300,9 @@ ArrayBufferList ArrayBufferSweeper::SweepingJob::SweepListFull(
ArrayBufferExtension* next = current->next();
if (!current->IsMarked()) {
- size_t bytes = current->accounting_length();
+ const size_t bytes = current->accounting_length();
delete current;
- sweeper_->IncrementFreedBytes(bytes);
+ if (bytes) freed_bytes_.fetch_add(bytes, std::memory_order_relaxed);
} else {
current->Unmark();
survivor_list.Append(current);
@@ -318,12 +311,12 @@ ArrayBufferList ArrayBufferSweeper::SweepingJob::SweepListFull(
current = next;
}
- list->Reset();
+ *list = ArrayBufferList();
return survivor_list;
}
void ArrayBufferSweeper::SweepingJob::SweepYoung() {
- CHECK_EQ(scope_, SweepingScope::kYoung);
+ DCHECK_EQ(SweepingType::kYoung, type_);
ArrayBufferExtension* current = young_.head_;
ArrayBufferList new_young;
@@ -335,7 +328,7 @@ void ArrayBufferSweeper::SweepingJob::SweepYoung() {
if (!current->IsYoungMarked()) {
size_t bytes = current->accounting_length();
delete current;
- sweeper_->IncrementFreedBytes(bytes);
+ if (bytes) freed_bytes_.fetch_add(bytes, std::memory_order_relaxed);
} else if (current->IsYoungPromoted()) {
current->YoungUnmark();
new_old.Append(current);
diff --git a/deps/v8/src/heap/array-buffer-sweeper.h b/deps/v8/src/heap/array-buffer-sweeper.h
index 6dd7ed97f6..14360dd67f 100644
--- a/deps/v8/src/heap/array-buffer-sweeper.h
+++ b/deps/v8/src/heap/array-buffer-sweeper.h
@@ -5,6 +5,9 @@
#ifndef V8_HEAP_ARRAY_BUFFER_SWEEPER_H_
#define V8_HEAP_ARRAY_BUFFER_SWEEPER_H_
+#include <memory>
+
+#include "src/base/logging.h"
#include "src/base/platform/mutex.h"
#include "src/objects/js-array-buffer.h"
#include "src/tasks/cancelable-task.h"
@@ -17,47 +20,38 @@ class Heap;
// Singly linked-list of ArrayBufferExtensions that stores head and tail of the
// list to allow for concatenation of lists.
-struct ArrayBufferList {
- ArrayBufferList() : head_(nullptr), tail_(nullptr), bytes_(0) {}
-
- ArrayBufferExtension* head_;
- ArrayBufferExtension* tail_;
- size_t bytes_;
-
- bool IsEmpty() {
- DCHECK_IMPLIES(head_, tail_);
- return head_ == nullptr;
- }
-
- size_t Bytes() { return bytes_; }
- size_t BytesSlow();
-
- void Reset() {
- head_ = tail_ = nullptr;
- bytes_ = 0;
- }
+struct ArrayBufferList final {
+ bool IsEmpty() const;
+ size_t ApproximateBytes() const { return bytes_; }
+ size_t BytesSlow() const;
void Append(ArrayBufferExtension* extension);
void Append(ArrayBufferList* list);
- V8_EXPORT_PRIVATE bool Contains(ArrayBufferExtension* extension);
+ V8_EXPORT_PRIVATE bool ContainsSlow(ArrayBufferExtension* extension) const;
+
+ private:
+ ArrayBufferExtension* head_ = nullptr;
+ ArrayBufferExtension* tail_ = nullptr;
+ // Bytes are approximate as they may be subtracted eagerly, while the
+ // `ArrayBufferExtension` is still in the list. The extension will only be
+ // dropped on next sweep.
+ size_t bytes_ = 0;
+
+ friend class ArrayBufferSweeper;
};
// The ArrayBufferSweeper iterates and deletes ArrayBufferExtensions
// concurrently to the application.
-class ArrayBufferSweeper {
+class ArrayBufferSweeper final {
public:
- explicit ArrayBufferSweeper(Heap* heap)
- : heap_(heap),
- sweeping_in_progress_(false),
- freed_bytes_(0),
- young_bytes_(0),
- old_bytes_(0) {}
- ~ArrayBufferSweeper() { ReleaseAll(); }
+ enum class SweepingType { kYoung, kFull };
+ explicit ArrayBufferSweeper(Heap* heap);
+ ~ArrayBufferSweeper();
+
+ void RequestSweep(SweepingType sweeping_type);
void EnsureFinished();
- void RequestSweepYoung();
- void RequestSweepFull();
// Track the given ArrayBufferExtension for the given JSArrayBuffer.
void Append(JSArrayBuffer object, ArrayBufferExtension* extension);
@@ -65,70 +59,40 @@ class ArrayBufferSweeper {
// Detaches an ArrayBufferExtension from a JSArrayBuffer.
void Detach(JSArrayBuffer object, ArrayBufferExtension* extension);
- ArrayBufferList young() { return young_; }
- ArrayBufferList old() { return old_; }
+ const ArrayBufferList& young() const { return young_; }
+ const ArrayBufferList& old() const { return old_; }
- size_t YoungBytes();
- size_t OldBytes();
+ // Bytes accounted in the young generation. Rebuilt during sweeping.
+ size_t YoungBytes() const { return young().ApproximateBytes(); }
+ // Bytes accounted in the old generation. Rebuilt during sweeping.
+ size_t OldBytes() const { return old().ApproximateBytes(); }
private:
- enum class SweepingScope { kYoung, kFull };
+ struct SweepingJob;
enum class SweepingState { kInProgress, kDone };
- struct SweepingJob {
- ArrayBufferSweeper* sweeper_;
- CancelableTaskManager::Id id_;
- std::atomic<SweepingState> state_;
- ArrayBufferList young_;
- ArrayBufferList old_;
- SweepingScope scope_;
-
- SweepingJob(ArrayBufferSweeper* sweeper, ArrayBufferList young,
- ArrayBufferList old, SweepingScope scope)
- : sweeper_(sweeper),
- id_(0),
- state_(SweepingState::kInProgress),
- young_(young),
- old_(old),
- scope_(scope) {}
-
- void Sweep();
- void SweepYoung();
- void SweepFull();
- ArrayBufferList SweepListFull(ArrayBufferList* list);
- };
-
- base::Optional<SweepingJob> job_;
-
- void Merge();
- void MergeBackExtensionsWhenSwept();
-
- void UpdateCountersForConcurrentlySweptExtensions();
+ bool sweeping_in_progress() const { return job_.get(); }
+
+ // Finishes sweeping if it is already done.
+ void FinishIfDone();
+
+ // Increments external memory counters outside of ArrayBufferSweeper.
+ // Increment may trigger GC.
void IncrementExternalMemoryCounters(size_t bytes);
void DecrementExternalMemoryCounters(size_t bytes);
- void IncrementFreedBytes(size_t bytes);
- void RequestSweep(SweepingScope sweeping_task);
- void Prepare(SweepingScope sweeping_task);
+ void Prepare(SweepingType type);
+ void Finalize();
- ArrayBufferList SweepYoungGen();
- void SweepOldGen(ArrayBufferExtension* extension);
-
- void ReleaseAll();
void ReleaseAll(ArrayBufferList* extension);
Heap* const heap_;
- bool sweeping_in_progress_;
+ std::unique_ptr<SweepingJob> job_;
base::Mutex sweeping_mutex_;
base::ConditionVariable job_finished_;
- std::atomic<size_t> freed_bytes_;
-
ArrayBufferList young_;
ArrayBufferList old_;
-
- size_t young_bytes_;
- size_t old_bytes_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 0dfe024db9..eba77baf77 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -31,6 +31,7 @@
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/slots-inl.h"
#include "src/objects/transitions-inl.h"
+#include "src/objects/visitors.h"
#include "src/utils/utils-inl.h"
#include "src/utils/utils.h"
@@ -87,11 +88,13 @@ class ConcurrentMarkingVisitor final
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
base::EnumSet<CodeFlushMode> code_flush_mode,
- bool embedder_tracing_enabled, bool is_forced_gc,
+ bool embedder_tracing_enabled,
+ bool should_keep_ages_unchanged,
MemoryChunkDataMap* memory_chunk_data)
: MarkingVisitorBase(task_id, local_marking_worklists, weak_objects, heap,
mark_compact_epoch, code_flush_mode,
- embedder_tracing_enabled, is_forced_gc),
+ embedder_tracing_enabled,
+ should_keep_ages_unchanged),
marking_state_(memory_chunk_data),
memory_chunk_data_(memory_chunk_data) {}
@@ -168,28 +171,28 @@ class ConcurrentMarkingVisitor final
private:
// Helper class for collecting in-object slot addresses and values.
- class SlotSnapshottingVisitor final : public ObjectVisitor {
+ class SlotSnapshottingVisitor final : public ObjectVisitorWithCageBases {
public:
- explicit SlotSnapshottingVisitor(SlotSnapshot* slot_snapshot)
- : slot_snapshot_(slot_snapshot) {
+ explicit SlotSnapshottingVisitor(SlotSnapshot* slot_snapshot,
+ PtrComprCageBase cage_base,
+ PtrComprCageBase code_cage_base)
+ : ObjectVisitorWithCageBases(cage_base, code_cage_base),
+ slot_snapshot_(slot_snapshot) {
slot_snapshot_->clear();
}
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
- PtrComprCageBase cage_base = GetPtrComprCageBase(host);
for (ObjectSlot p = start; p < end; ++p) {
- Object object = p.Relaxed_Load(cage_base);
+ Object object = p.Relaxed_Load(cage_base());
slot_snapshot_->add(p, object);
}
}
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base = GetPtrComprCageBase(host);
- Object code = slot.Relaxed_Load(code_cage_base);
- slot_snapshot_->add(slot, code);
+ Object code = slot.Relaxed_Load(code_cage_base());
+ slot_snapshot_->add(ObjectSlot(slot.address()), code);
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
@@ -280,7 +283,8 @@ class ConcurrentMarkingVisitor final
template <typename T, typename TBodyDescriptor>
const SlotSnapshot& MakeSlotSnapshot(Map map, T object, int size) {
- SlotSnapshottingVisitor visitor(&slot_snapshot_);
+ SlotSnapshottingVisitor visitor(&slot_snapshot_, cage_base(),
+ code_cage_base());
visitor.VisitPointer(object, object.map_slot());
TBodyDescriptor::IterateBody(map, object, size, &visitor);
return slot_snapshot_;
@@ -368,11 +372,12 @@ StrongDescriptorArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
class ConcurrentMarking::JobTask : public v8::JobTask {
public:
JobTask(ConcurrentMarking* concurrent_marking, unsigned mark_compact_epoch,
- base::EnumSet<CodeFlushMode> code_flush_mode, bool is_forced_gc)
+ base::EnumSet<CodeFlushMode> code_flush_mode,
+ bool should_keep_ages_unchanged)
: concurrent_marking_(concurrent_marking),
mark_compact_epoch_(mark_compact_epoch),
code_flush_mode_(code_flush_mode),
- is_forced_gc_(is_forced_gc) {}
+ should_keep_ages_unchanged_(should_keep_ages_unchanged) {}
~JobTask() override = default;
JobTask(const JobTask&) = delete;
@@ -383,13 +388,13 @@ class ConcurrentMarking::JobTask : public v8::JobTask {
if (delegate->IsJoiningThread()) {
// TRACE_GC is not needed here because the caller opens the right scope.
concurrent_marking_->Run(delegate, code_flush_mode_, mark_compact_epoch_,
- is_forced_gc_);
+ should_keep_ages_unchanged_);
} else {
TRACE_GC_EPOCH(concurrent_marking_->heap_->tracer(),
GCTracer::Scope::MC_BACKGROUND_MARKING,
ThreadKind::kBackground);
concurrent_marking_->Run(delegate, code_flush_mode_, mark_compact_epoch_,
- is_forced_gc_);
+ should_keep_ages_unchanged_);
}
}
@@ -401,7 +406,7 @@ class ConcurrentMarking::JobTask : public v8::JobTask {
ConcurrentMarking* concurrent_marking_;
const unsigned mark_compact_epoch_;
base::EnumSet<CodeFlushMode> code_flush_mode_;
- const bool is_forced_gc_;
+ const bool should_keep_ages_unchanged_;
};
ConcurrentMarking::ConcurrentMarking(Heap* heap,
@@ -422,7 +427,8 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap,
void ConcurrentMarking::Run(JobDelegate* delegate,
base::EnumSet<CodeFlushMode> code_flush_mode,
- unsigned mark_compact_epoch, bool is_forced_gc) {
+ unsigned mark_compact_epoch,
+ bool should_keep_ages_unchanged) {
size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000;
uint8_t task_id = delegate->GetTaskId() + 1;
@@ -431,7 +437,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
ConcurrentMarkingVisitor visitor(
task_id, &local_marking_worklists, weak_objects_, heap_,
mark_compact_epoch, code_flush_mode,
- heap_->local_embedder_heap_tracer()->InUse(), is_forced_gc,
+ heap_->local_embedder_heap_tracer()->InUse(), should_keep_ages_unchanged,
&task_state->memory_chunk_data);
NativeContextInferrer& native_context_inferrer =
task_state->native_context_inferrer;
@@ -575,7 +581,7 @@ void ConcurrentMarking::ScheduleJob(TaskPriority priority) {
priority, std::make_unique<JobTask>(
this, heap_->mark_compact_collector()->epoch(),
heap_->mark_compact_collector()->code_flush_mode(),
- heap_->is_current_gc_forced()));
+ heap_->ShouldCurrentGCKeepAgesUnchanged()));
DCHECK(job_handle_->IsValid());
}
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index 87d39ccdeb..12ee70da56 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -108,7 +108,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
};
class JobTask;
void Run(JobDelegate* delegate, base::EnumSet<CodeFlushMode> code_flush_mode,
- unsigned mark_compact_epoch, bool is_forced_gc);
+ unsigned mark_compact_epoch, bool should_keep_ages_unchanged);
size_t GetMaxConcurrency(size_t worker_count);
std::unique_ptr<JobHandle> job_handle_;
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.cc b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
index c21d1ceb50..5e90039f40 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -254,6 +254,7 @@ void CppHeap::MetricRecorderAdapter::AddMainThreadEvent(
if (incremental_mark_batched_events_.events.size() == kMaxBatchedEvents) {
recorder->AddMainThreadEvent(std::move(incremental_mark_batched_events_),
GetContextId());
+ incremental_mark_batched_events_ = {};
}
}
@@ -272,6 +273,7 @@ void CppHeap::MetricRecorderAdapter::AddMainThreadEvent(
if (incremental_sweep_batched_events_.events.size() == kMaxBatchedEvents) {
recorder->AddMainThreadEvent(std::move(incremental_sweep_batched_events_),
GetContextId());
+ incremental_sweep_batched_events_ = {};
}
}
@@ -282,10 +284,12 @@ void CppHeap::MetricRecorderAdapter::FlushBatchedIncrementalEvents() {
if (!incremental_mark_batched_events_.events.empty()) {
recorder->AddMainThreadEvent(std::move(incremental_mark_batched_events_),
GetContextId());
+ incremental_mark_batched_events_ = {};
}
if (!incremental_sweep_batched_events_.events.empty()) {
recorder->AddMainThreadEvent(std::move(incremental_sweep_batched_events_),
GetContextId());
+ incremental_sweep_batched_events_ = {};
}
}
diff --git a/deps/v8/src/heap/cppgc/allocation.cc b/deps/v8/src/heap/cppgc/allocation.cc
index 22f4703982..ee4693c7f0 100644
--- a/deps/v8/src/heap/cppgc/allocation.cc
+++ b/deps/v8/src/heap/cppgc/allocation.cc
@@ -9,20 +9,30 @@
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/object-allocator.h"
+#if defined(__clang__) && !defined(DEBUG) && V8_HAS_ATTRIBUTE_ALWAYS_INLINE
+#define CPPGC_FORCE_ALWAYS_INLINE __attribute__((always_inline))
+#else
+#define CPPGC_FORCE_ALWAYS_INLINE
+#endif
+
namespace cppgc {
namespace internal {
STATIC_ASSERT(api_constants::kLargeObjectSizeThreshold ==
kLargeObjectSizeThreshold);
+// Using CPPGC_FORCE_ALWAYS_INLINE to guide LTO for inlining the allocation
+// fast path.
// static
-void* MakeGarbageCollectedTraitInternal::Allocate(
+CPPGC_FORCE_ALWAYS_INLINE void* MakeGarbageCollectedTraitInternal::Allocate(
cppgc::AllocationHandle& handle, size_t size, GCInfoIndex index) {
return static_cast<ObjectAllocator&>(handle).AllocateObject(size, index);
}
+// Using CPPGC_FORCE_ALWAYS_INLINE to guide LTO for inlining the allocation
+// fast path.
// static
-void* MakeGarbageCollectedTraitInternal::Allocate(
+CPPGC_FORCE_ALWAYS_INLINE void* MakeGarbageCollectedTraitInternal::Allocate(
cppgc::AllocationHandle& handle, size_t size, GCInfoIndex index,
CustomSpaceIndex space_index) {
return static_cast<ObjectAllocator&>(handle).AllocateObject(size, index,
diff --git a/deps/v8/src/heap/cppgc/caged-heap.cc b/deps/v8/src/heap/cppgc/caged-heap.cc
index 2b5fed4af5..4ba7a36bea 100644
--- a/deps/v8/src/heap/cppgc/caged-heap.cc
+++ b/deps/v8/src/heap/cppgc/caged-heap.cc
@@ -50,7 +50,9 @@ class CppgcBoundedPageAllocator final : public v8::base::BoundedPageAllocator {
public:
CppgcBoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
size_t size, size_t allocate_page_size)
- : BoundedPageAllocator(page_allocator, start, size, allocate_page_size) {}
+ : BoundedPageAllocator(page_allocator, start, size, allocate_page_size,
+ v8::base::PageInitializationMode::
+ kAllocatedPagesCanBeUninitialized) {}
bool FreePages(void* address, size_t size) final {
// BoundedPageAllocator is not guaranteed to allocate zeroed page.
@@ -61,7 +63,7 @@ class CppgcBoundedPageAllocator final : public v8::base::BoundedPageAllocator {
// contents. To mitigate this problem, CppgcBoundedPageAllocator clears all
// pages before they are freed. This also includes protected guard pages, so
// CppgcBoundedPageAllocator needs to update permissions before clearing.
- SetPermissions(address, size, Permission::kReadWrite);
+ CHECK(SetPermissions(address, size, Permission::kReadWrite));
memset(address, 0, size);
return v8::base::BoundedPageAllocator::FreePages(address, size);
}
diff --git a/deps/v8/src/heap/cppgc/heap-base.h b/deps/v8/src/heap/cppgc/heap-base.h
index 6196955a3e..f350a99d01 100644
--- a/deps/v8/src/heap/cppgc/heap-base.h
+++ b/deps/v8/src/heap/cppgc/heap-base.h
@@ -206,6 +206,8 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
stats_collector_->SetMetricRecorder(std::move(histogram_recorder));
}
+ int GetCreationThreadId() const { return creation_thread_id_; }
+
protected:
// Used by the incremental scheduler to finalize a GC if supported.
virtual void FinalizeIncrementalGarbageCollectionIfNeeded(
@@ -270,6 +272,8 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
bool in_atomic_pause_ = false;
+ int creation_thread_id_ = v8::base::OS::GetCurrentThreadId();
+
friend class MarkerBase::IncrementalMarkingTask;
friend class testing::TestWithHeap;
friend class cppgc::subtle::DisallowGarbageCollectionScope;
diff --git a/deps/v8/src/heap/cppgc/heap-object-header.h b/deps/v8/src/heap/cppgc/heap-object-header.h
index 97a65fbf20..f1d67df8b5 100644
--- a/deps/v8/src/heap/cppgc/heap-object-header.h
+++ b/deps/v8/src/heap/cppgc/heap-object-header.h
@@ -120,14 +120,14 @@ class HeapObjectHeader {
static constexpr size_t DecodeSize(uint16_t encoded) {
// Essentially, gets optimized to << 1.
- using SizeField = MarkBitField::Next<size_t, 15>;
- return SizeField::decode(encoded) * kAllocationGranularity;
+ using SizeFieldImpl = MarkBitField::Next<size_t, 15>;
+ return SizeFieldImpl::decode(encoded) * kAllocationGranularity;
}
static constexpr uint16_t EncodeSize(size_t size) {
// Essentially, gets optimized to >> 1.
- using SizeField = MarkBitField::Next<size_t, 15>;
- return SizeField::encode(size / kAllocationGranularity);
+ using SizeFieldImpl = MarkBitField::Next<size_t, 15>;
+ return SizeFieldImpl::encode(size / kAllocationGranularity);
}
V8_EXPORT_PRIVATE void CheckApiConstants();
diff --git a/deps/v8/src/heap/cppgc/heap-statistics-collector.cc b/deps/v8/src/heap/cppgc/heap-statistics-collector.cc
index 5833211fcb..7c91bbd4f5 100644
--- a/deps/v8/src/heap/cppgc/heap-statistics-collector.cc
+++ b/deps/v8/src/heap/cppgc/heap-statistics-collector.cc
@@ -73,7 +73,7 @@ void FinalizeSpace(HeapStatistics* stats,
}
void RecordObjectType(
- std::unordered_map<const char*, size_t>& type_map,
+ std::unordered_map<const void*, size_t>& type_map,
std::vector<HeapStatistics::ObjectStatsEntry>& object_statistics,
HeapObjectHeader* header, size_t object_size) {
if (!NameProvider::HideInternalNames()) {
@@ -109,7 +109,7 @@ HeapStatistics HeapStatisticsCollector::CollectDetailedStatistics(
if (!NameProvider::HideInternalNames()) {
stats.type_names.resize(type_name_to_index_map_.size());
for (auto& it : type_name_to_index_map_) {
- stats.type_names[it.second] = it.first;
+ stats.type_names[it.second] = reinterpret_cast<const char*>(it.first);
}
}
diff --git a/deps/v8/src/heap/cppgc/heap-statistics-collector.h b/deps/v8/src/heap/cppgc/heap-statistics-collector.h
index c0b1fe7c63..1e492bfe7f 100644
--- a/deps/v8/src/heap/cppgc/heap-statistics-collector.h
+++ b/deps/v8/src/heap/cppgc/heap-statistics-collector.h
@@ -30,10 +30,10 @@ class HeapStatisticsCollector : private HeapVisitor<HeapStatisticsCollector> {
HeapStatistics::SpaceStatistics* current_space_stats_ = nullptr;
HeapStatistics::PageStatistics* current_page_stats_ = nullptr;
// Index from type name to final index in `HeapStats::type_names`.
- // Canonicalizing based on `const char*` assuming stable addresses. If the
+ // Canonicalizing based on `const void*` assuming stable addresses. If the
// implementation of `NameProvider` decides to return different type name
// c-strings, the final outcome is less compact.
- std::unordered_map<const char*, size_t> type_name_to_index_map_;
+ std::unordered_map<const void*, size_t> type_name_to_index_map_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
index 88f2f3c608..c2bbba1076 100644
--- a/deps/v8/src/heap/cppgc/marker.cc
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -214,7 +214,7 @@ void MarkerBase::StartMarking() {
is_marking_ = true;
if (EnterIncrementalMarkingIfNeeded(config_, heap())) {
- StatsCollector::EnabledScope stats_scope(
+ StatsCollector::EnabledScope inner_stats_scope(
heap().stats_collector(), StatsCollector::kMarkIncrementalStart);
// Performing incremental or concurrent marking.
@@ -243,13 +243,6 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
}
config_.stack_state = stack_state;
config_.marking_type = MarkingConfig::MarkingType::kAtomic;
- mutator_marking_state_.set_in_atomic_pause();
-
- // Lock guards against changes to {Weak}CrossThreadPersistent handles, that
- // may conflict with marking. E.g., a WeakCrossThreadPersistent may be
- // converted into a CrossThreadPersistent which requires that the handle
- // is either cleared or the object is retained.
- g_process_mutex.Pointer()->Lock();
{
// VisitRoots also resets the LABs.
@@ -308,6 +301,7 @@ void MarkerBase::ProcessWeakness() {
heap().GetWeakPersistentRegion().Trace(&visitor());
// Processing cross-thread handles requires taking the process lock.
g_process_mutex.Get().AssertHeld();
+ CHECK(visited_cross_thread_persistents_in_atomic_pause_);
heap().GetWeakCrossThreadPersistentRegion().Trace(&visitor());
// Call weak callbacks on objects that may now be pointing to dead objects.
@@ -337,13 +331,6 @@ void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
heap().stats_collector(), StatsCollector::kMarkVisitPersistents);
heap().GetStrongPersistentRegion().Trace(&visitor());
}
- if (config_.marking_type == MarkingConfig::MarkingType::kAtomic) {
- StatsCollector::DisabledScope inner_stats_scope(
- heap().stats_collector(),
- StatsCollector::kMarkVisitCrossThreadPersistents);
- g_process_mutex.Get().AssertHeld();
- heap().GetStrongCrossThreadPersistentRegion().Trace(&visitor());
- }
}
if (stack_state != MarkingConfig::StackState::kNoHeapPointers) {
@@ -356,6 +343,24 @@ void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
}
}
+bool MarkerBase::VisitCrossThreadPersistentsIfNeeded() {
+ if (config_.marking_type != MarkingConfig::MarkingType::kAtomic ||
+ visited_cross_thread_persistents_in_atomic_pause_)
+ return false;
+
+ StatsCollector::DisabledScope inner_stats_scope(
+ heap().stats_collector(),
+ StatsCollector::kMarkVisitCrossThreadPersistents);
+ // Lock guards against changes to {Weak}CrossThreadPersistent handles, that
+ // may conflict with marking. E.g., a WeakCrossThreadPersistent may be
+ // converted into a CrossThreadPersistent which requires that the handle
+ // is either cleared or the object is retained.
+ g_process_mutex.Pointer()->Lock();
+ heap().GetStrongCrossThreadPersistentRegion().Trace(&visitor());
+ visited_cross_thread_persistents_in_atomic_pause_ = true;
+ return (heap().GetStrongCrossThreadPersistentRegion().NodesInUse() > 0);
+}
+
void MarkerBase::ScheduleIncrementalMarkingTask() {
DCHECK(platform_);
if (!foreground_task_runner_ || incremental_marking_handle_) return;
@@ -400,8 +405,13 @@ bool MarkerBase::AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,
heap().stats_collector(),
StatsCollector::kMarkTransitiveClosureWithDeadline, "deadline_ms",
max_duration.InMillisecondsF());
- is_done = ProcessWorklistsWithDeadline(
- marked_bytes_limit, v8::base::TimeTicks::Now() + max_duration);
+ const auto deadline = v8::base::TimeTicks::Now() + max_duration;
+ is_done = ProcessWorklistsWithDeadline(marked_bytes_limit, deadline);
+ if (is_done && VisitCrossThreadPersistentsIfNeeded()) {
+ // Both limits are absolute and hence can be passed along without further
+ // adjustment.
+ is_done = ProcessWorklistsWithDeadline(marked_bytes_limit, deadline);
+ }
schedule_.UpdateMutatorThreadMarkedBytes(
mutator_marking_state_.marked_bytes());
}
@@ -515,7 +525,7 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
saved_did_discover_new_ephemeron_pairs =
mutator_marking_state_.DidDiscoverNewEphemeronPairs();
{
- StatsCollector::EnabledScope stats_scope(
+ StatsCollector::EnabledScope inner_stats_scope(
heap().stats_collector(), StatsCollector::kMarkProcessEphemerons);
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
diff --git a/deps/v8/src/heap/cppgc/marker.h b/deps/v8/src/heap/cppgc/marker.h
index 1b41d0b6e8..c18973e235 100644
--- a/deps/v8/src/heap/cppgc/marker.h
+++ b/deps/v8/src/heap/cppgc/marker.h
@@ -164,6 +164,8 @@ class V8_EXPORT_PRIVATE MarkerBase {
void VisitRoots(MarkingConfig::StackState);
+ bool VisitCrossThreadPersistentsIfNeeded();
+
void MarkNotFullyConstructedObjects();
void ScheduleIncrementalMarkingTask();
@@ -186,6 +188,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
std::unique_ptr<ConcurrentMarkerBase> concurrent_marker_{nullptr};
bool main_marking_disabled_for_testing_{false};
+ bool visited_cross_thread_persistents_in_atomic_pause_{false};
friend class MarkerFactory;
};
diff --git a/deps/v8/src/heap/cppgc/marking-state.h b/deps/v8/src/heap/cppgc/marking-state.h
index 5f6f0aba37..864c8209b7 100644
--- a/deps/v8/src/heap/cppgc/marking-state.h
+++ b/deps/v8/src/heap/cppgc/marking-state.h
@@ -9,7 +9,6 @@
#include "include/cppgc/trace-trait.h"
#include "include/cppgc/visitor.h"
-#include "src/base/logging.h"
#include "src/heap/cppgc/compaction-worklists.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
@@ -124,8 +123,6 @@ class MarkingStateBase {
discovered_new_ephemeron_pairs_ = false;
}
- void set_in_atomic_pause() { in_atomic_pause_ = true; }
-
protected:
inline void MarkAndPush(HeapObjectHeader&, TraceDescriptor);
@@ -163,7 +160,6 @@ class MarkingStateBase {
size_t marked_bytes_ = 0;
bool in_ephemeron_processing_ = false;
bool discovered_new_ephemeron_pairs_ = false;
- bool in_atomic_pause_ = false;
};
MarkingStateBase::MarkingStateBase(HeapBase& heap,
@@ -304,19 +300,12 @@ void MarkingStateBase::ProcessEphemeron(const void* key, const void* value,
// would break the main marking loop.
DCHECK(!in_ephemeron_processing_);
in_ephemeron_processing_ = true;
- // Keys are considered live even in incremental/concurrent marking settings
- // because the write barrier for WeakMember ensures that any newly set value
- // after this point is kept alive and does not require the callback.
- const bool key_in_construction =
- HeapObjectHeader::FromObject(key).IsInConstruction<AccessMode::kAtomic>();
- const bool key_considered_as_live =
- key_in_construction
- ? in_atomic_pause_
- : HeapObjectHeader::FromObject(key).IsMarked<AccessMode::kAtomic>();
- DCHECK_IMPLIES(
- key_in_construction && in_atomic_pause_,
- HeapObjectHeader::FromObject(key).IsMarked<AccessMode::kAtomic>());
- if (key_considered_as_live) {
+ // Filter out already marked keys. The write barrier for WeakMember
+ // ensures that any newly set value after this point is kept alive and does
+ // not require the callback.
+ if (!HeapObjectHeader::FromObject(key)
+ .IsInConstruction<AccessMode::kAtomic>() &&
+ HeapObjectHeader::FromObject(key).IsMarked<AccessMode::kAtomic>()) {
if (value_desc.base_object_payload) {
MarkAndPush(value_desc.base_object_payload, value_desc);
} else {
diff --git a/deps/v8/src/heap/cppgc/object-allocator.cc b/deps/v8/src/heap/cppgc/object-allocator.cc
index 0f85d43c1c..43e7c9b79a 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.cc
+++ b/deps/v8/src/heap/cppgc/object-allocator.cc
@@ -59,9 +59,11 @@ void AddToFreeList(NormalPageSpace& space, Address start, size_t size) {
// No need for SetMemoryInaccessible() as LAB memory is retrieved as free
// inaccessible memory.
space.free_list().Add({start, size});
+ // Concurrent marking may be running while the LAB is set up next to a live
+ // object sharing the same cell in the bitmap.
NormalPage::From(BasePage::FromPayload(start))
->object_start_bitmap()
- .SetBit(start);
+ .SetBit<AccessMode::kAtomic>(start);
}
void ReplaceLinearAllocationBuffer(NormalPageSpace& space,
@@ -78,7 +80,9 @@ void ReplaceLinearAllocationBuffer(NormalPageSpace& space,
DCHECK_NOT_NULL(new_buffer);
stats_collector.NotifyAllocation(new_size);
auto* page = NormalPage::From(BasePage::FromPayload(new_buffer));
- page->object_start_bitmap().ClearBit(new_buffer);
+ // Concurrent marking may be running while the LAB is set up next to a live
+ // object sharing the same cell in the bitmap.
+ page->object_start_bitmap().ClearBit<AccessMode::kAtomic>(new_buffer);
MarkRangeAsYoung(page, new_buffer, new_buffer + new_size);
}
}
diff --git a/deps/v8/src/heap/cppgc/persistent-node.cc b/deps/v8/src/heap/cppgc/persistent-node.cc
index 8a3d6cd97c..37933bbcab 100644
--- a/deps/v8/src/heap/cppgc/persistent-node.cc
+++ b/deps/v8/src/heap/cppgc/persistent-node.cc
@@ -9,15 +9,16 @@
#include "include/cppgc/cross-thread-persistent.h"
#include "include/cppgc/persistent.h"
+#include "src/base/platform/platform.h"
#include "src/heap/cppgc/process-heap.h"
namespace cppgc {
namespace internal {
-PersistentRegion::~PersistentRegion() { ClearAllUsedNodes(); }
+PersistentRegionBase::~PersistentRegionBase() { ClearAllUsedNodes(); }
template <typename PersistentBaseClass>
-void PersistentRegion::ClearAllUsedNodes() {
+void PersistentRegionBase::ClearAllUsedNodes() {
for (auto& slots : nodes_) {
for (auto& node : *slots) {
if (!node.IsUsed()) continue;
@@ -35,14 +36,15 @@ void PersistentRegion::ClearAllUsedNodes() {
CPPGC_DCHECK(0u == nodes_in_use_);
}
-template void PersistentRegion::ClearAllUsedNodes<CrossThreadPersistentBase>();
-template void PersistentRegion::ClearAllUsedNodes<PersistentBase>();
+template void
+PersistentRegionBase::ClearAllUsedNodes<CrossThreadPersistentBase>();
+template void PersistentRegionBase::ClearAllUsedNodes<PersistentBase>();
-void PersistentRegion::ClearAllUsedNodes() {
+void PersistentRegionBase::ClearAllUsedNodes() {
ClearAllUsedNodes<PersistentBase>();
}
-size_t PersistentRegion::NodesInUse() const {
+size_t PersistentRegionBase::NodesInUse() const {
#ifdef DEBUG
const size_t accumulated_nodes_in_use_ = std::accumulate(
nodes_.cbegin(), nodes_.cend(), 0u, [](size_t acc, const auto& slots) {
@@ -56,7 +58,7 @@ size_t PersistentRegion::NodesInUse() const {
return nodes_in_use_;
}
-void PersistentRegion::EnsureNodeSlots() {
+void PersistentRegionBase::EnsureNodeSlots() {
nodes_.push_back(std::make_unique<PersistentNodeSlots>());
for (auto& node : *nodes_.back()) {
node.InitializeAsFreeNode(free_list_head_);
@@ -64,7 +66,7 @@ void PersistentRegion::EnsureNodeSlots() {
}
}
-void PersistentRegion::Trace(Visitor* visitor) {
+void PersistentRegionBase::Trace(Visitor* visitor) {
free_list_head_ = nullptr;
for (auto& slots : nodes_) {
bool is_empty = true;
@@ -92,6 +94,15 @@ void PersistentRegion::Trace(Visitor* visitor) {
nodes_.end());
}
+PersistentRegion::PersistentRegion()
+ : creation_thread_id_(v8::base::OS::GetCurrentThreadId()) {
+ USE(creation_thread_id_);
+}
+
+void PersistentRegion::CheckIsCreationThread() {
+ DCHECK_EQ(creation_thread_id_, v8::base::OS::GetCurrentThreadId());
+}
+
PersistentRegionLock::PersistentRegionLock() {
g_process_mutex.Pointer()->Lock();
}
@@ -107,24 +118,24 @@ void PersistentRegionLock::AssertLocked() {
CrossThreadPersistentRegion::~CrossThreadPersistentRegion() {
PersistentRegionLock guard;
- PersistentRegion::ClearAllUsedNodes<CrossThreadPersistentBase>();
+ PersistentRegionBase::ClearAllUsedNodes<CrossThreadPersistentBase>();
nodes_.clear();
- // PersistentRegion destructor will be a noop.
+ // PersistentRegionBase destructor will be a noop.
}
void CrossThreadPersistentRegion::Trace(Visitor* visitor) {
PersistentRegionLock::AssertLocked();
- PersistentRegion::Trace(visitor);
+ PersistentRegionBase::Trace(visitor);
}
size_t CrossThreadPersistentRegion::NodesInUse() const {
// This method does not require a lock.
- return PersistentRegion::NodesInUse();
+ return PersistentRegionBase::NodesInUse();
}
void CrossThreadPersistentRegion::ClearAllUsedNodes() {
PersistentRegionLock::AssertLocked();
- PersistentRegion::ClearAllUsedNodes<CrossThreadPersistentBase>();
+ PersistentRegionBase::ClearAllUsedNodes<CrossThreadPersistentBase>();
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/pointer-policies.cc b/deps/v8/src/heap/cppgc/pointer-policies.cc
index 3c7cb61761..b50f96d70e 100644
--- a/deps/v8/src/heap/cppgc/pointer-policies.cc
+++ b/deps/v8/src/heap/cppgc/pointer-policies.cc
@@ -30,8 +30,8 @@ bool IsOnStack(const void* address) {
} // namespace
-void EnabledCheckingPolicy::CheckPointerImpl(const void* ptr,
- bool points_to_payload) {
+void SameThreadEnabledCheckingPolicyBase::CheckPointerImpl(
+ const void* ptr, bool points_to_payload, bool check_off_heap_assignments) {
// `ptr` must not reside on stack.
DCHECK(!IsOnStack(ptr));
auto* base_page = BasePage::FromPayload(ptr);
@@ -41,12 +41,14 @@ void EnabledCheckingPolicy::CheckPointerImpl(const void* ptr,
// References cannot change their heap association which means that state is
// immutable once it is set.
+ bool is_on_heap = true;
if (!heap_) {
heap_ = &base_page->heap();
if (!heap_->page_backend()->Lookup(reinterpret_cast<Address>(this))) {
// If `this` is not contained within the heap of `ptr`, we must deal with
// an on-stack or off-heap reference. For both cases there should be no
// heap registered.
+ is_on_heap = false;
CHECK(!HeapRegistry::TryFromManagedPointer(this));
}
}
@@ -54,6 +56,8 @@ void EnabledCheckingPolicy::CheckPointerImpl(const void* ptr,
// Member references should never mix heaps.
DCHECK_EQ(heap_, &base_page->heap());
+ DCHECK_EQ(heap_->GetCreationThreadId(), v8::base::OS::GetCurrentThreadId());
+
// Header checks.
const HeapObjectHeader* header = nullptr;
if (points_to_payload) {
@@ -68,20 +72,24 @@ void EnabledCheckingPolicy::CheckPointerImpl(const void* ptr,
DCHECK(!header->IsFree());
}
-#ifdef CPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS
- if (heap_->prefinalizer_handler()->IsInvokingPreFinalizers()) {
- // During prefinalizers invocation, check that |ptr| refers to a live object
- // and that it is assigned to a live slot.
- DCHECK(header->IsMarked());
- // Slot can be in a large object.
- const auto* slot_page = BasePage::FromInnerAddress(heap_, this);
- // Off-heap slots (from other heaps or on-stack) are considered live.
- bool slot_is_live =
- !slot_page || slot_page->ObjectHeaderFromInnerAddress(this).IsMarked();
- DCHECK(slot_is_live);
- USE(slot_is_live);
+#ifdef CPPGC_VERIFY_HEAP
+ if (check_off_heap_assignments || is_on_heap) {
+ if (heap_->prefinalizer_handler()->IsInvokingPreFinalizers()) {
+ // Slot can be in a large object.
+ const auto* slot_page = BasePage::FromInnerAddress(heap_, this);
+ // Off-heap slots (from other heaps or on-stack) are considered live.
+ bool slot_is_live =
+ !slot_page ||
+ slot_page->ObjectHeaderFromInnerAddress(this).IsMarked();
+ // During prefinalizers invocation, check that if the slot is live then
+ // |ptr| refers to a live object.
+ DCHECK_IMPLIES(slot_is_live, header->IsMarked());
+ USE(slot_is_live);
+ }
}
-#endif // CPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS
+#else
+ USE(is_on_heap);
+#endif // CPPGC_VERIFY_HEAP
}
PersistentRegion& StrongPersistentPolicy::GetPersistentRegion(
diff --git a/deps/v8/src/heap/cppgc/sweeper.cc b/deps/v8/src/heap/cppgc/sweeper.cc
index 482bab1595..26b4498d6b 100644
--- a/deps/v8/src/heap/cppgc/sweeper.cc
+++ b/deps/v8/src/heap/cppgc/sweeper.cc
@@ -817,7 +817,7 @@ class Sweeper::SweeperImpl final {
MutatorThreadSweeper sweeper(&space_states_, platform_,
config_.free_memory_handling);
{
- StatsCollector::EnabledScope stats_scope(
+ StatsCollector::EnabledScope inner_stats_scope(
stats_collector_, internal_scope_id, "deltaInSeconds",
deadline_in_seconds - platform_->MonotonicallyIncreasingTime());
diff --git a/deps/v8/src/heap/factory-base.cc b/deps/v8/src/heap/factory-base.cc
index 2547d40f0c..576e26507f 100644
--- a/deps/v8/src/heap/factory-base.cc
+++ b/deps/v8/src/heap/factory-base.cc
@@ -627,13 +627,13 @@ MaybeHandle<String> FactoryBase<Impl>::NewConsString(
// Copy left part.
{
const uint8_t* src =
- left->template GetChars<uint8_t>(no_gc, access_guard);
+ left->template GetChars<uint8_t>(isolate(), no_gc, access_guard);
CopyChars(dest, src, left_length);
}
// Copy right part.
{
const uint8_t* src =
- right->template GetChars<uint8_t>(no_gc, access_guard);
+ right->template GetChars<uint8_t>(isolate(), no_gc, access_guard);
CopyChars(dest + left_length, src, right_length);
}
return result;
@@ -645,9 +645,10 @@ MaybeHandle<String> FactoryBase<Impl>::NewConsString(
DisallowGarbageCollection no_gc;
SharedStringAccessGuardIfNeeded access_guard(isolate());
base::uc16* sink = result->GetChars(no_gc, access_guard);
- String::WriteToFlat(*left, sink, 0, left->length(), access_guard);
- String::WriteToFlat(*right, sink + left->length(), 0, right->length(),
+ String::WriteToFlat(*left, sink, 0, left->length(), isolate(),
access_guard);
+ String::WriteToFlat(*right, sink + left->length(), 0, right->length(),
+ isolate(), access_guard);
return result;
}
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index e995a49897..ae6c0e27f8 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -1021,14 +1021,14 @@ Handle<String> Factory::NewProperSubString(Handle<String> str, int begin,
NewRawOneByteString(length).ToHandleChecked();
DisallowGarbageCollection no_gc;
uint8_t* dest = result->GetChars(no_gc);
- String::WriteToFlat(*str, dest, begin, end);
+ String::WriteToFlat(*str, dest, begin, length);
return result;
} else {
Handle<SeqTwoByteString> result =
NewRawTwoByteString(length).ToHandleChecked();
DisallowGarbageCollection no_gc;
base::uc16* dest = result->GetChars(no_gc);
- String::WriteToFlat(*str, dest, begin, end);
+ String::WriteToFlat(*str, dest, begin, length);
return result;
}
}
@@ -1572,14 +1572,17 @@ Handle<WasmArray> Factory::NewWasmArray(
WasmArray result = WasmArray::cast(raw);
result.set_raw_properties_or_hash(*empty_fixed_array(), kRelaxedStore);
result.set_length(length);
- for (uint32_t i = 0; i < length; i++) {
- Address address = result.ElementAddress(i);
- if (type->element_type().is_numeric()) {
+ if (type->element_type().is_numeric()) {
+ for (uint32_t i = 0; i < length; i++) {
+ Address address = result.ElementAddress(i);
elements[i]
.Packed(type->element_type())
.CopyTo(reinterpret_cast<byte*>(address));
- } else {
- base::WriteUnalignedValue<Object>(address, *elements[i].to_ref());
+ }
+ } else {
+ for (uint32_t i = 0; i < length; i++) {
+ int offset = result.element_offset(i);
+ TaggedField<Object>::store(result, offset, *elements[i].to_ref());
}
}
return handle(result, isolate());
@@ -1594,11 +1597,13 @@ Handle<WasmStruct> Factory::NewWasmStruct(const wasm::StructType* type,
WasmStruct result = WasmStruct::cast(raw);
result.set_raw_properties_or_hash(*empty_fixed_array(), kRelaxedStore);
for (uint32_t i = 0; i < type->field_count(); i++) {
- Address address = result.RawFieldAddress(type->field_offset(i));
+ int offset = type->field_offset(i);
if (type->field(i).is_numeric()) {
+ Address address = result.RawFieldAddress(offset);
args[i].Packed(type->field(i)).CopyTo(reinterpret_cast<byte*>(address));
} else {
- base::WriteUnalignedValue<Object>(address, *args[i].to_ref());
+ offset += WasmStruct::kHeaderSize;
+ TaggedField<Object>::store(result, offset, *args[i].to_ref());
}
}
return handle(result, isolate());
@@ -3654,7 +3659,8 @@ Handle<Map> Factory::CreateStrictFunctionMap(
}
Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) {
- Handle<Map> map = NewMap(JS_FUNCTION_TYPE, JSFunction::kSizeWithPrototype);
+ Handle<Map> map =
+ NewMap(JS_CLASS_CONSTRUCTOR_TYPE, JSFunction::kSizeWithPrototype);
{
DisallowGarbageCollection no_gc;
Map raw_map = *map;
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index a780ac01b0..8ddd177c6b 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -257,14 +257,14 @@ void GCTracer::Start(GarbageCollector collector,
previous_ = current_;
switch (collector) {
- case SCAVENGER:
+ case GarbageCollector::SCAVENGER:
current_ = Event(Event::SCAVENGER, gc_reason, collector_reason);
break;
- case MINOR_MARK_COMPACTOR:
+ case GarbageCollector::MINOR_MARK_COMPACTOR:
current_ =
Event(Event::MINOR_MARK_COMPACTOR, gc_reason, collector_reason);
break;
- case MARK_COMPACTOR:
+ case GarbageCollector::MARK_COMPACTOR:
if (heap_->incremental_marking()->WasActivated()) {
current_ = Event(Event::INCREMENTAL_MARK_COMPACTOR, gc_reason,
collector_reason);
@@ -344,10 +344,11 @@ void GCTracer::Stop(GarbageCollector collector) {
}
DCHECK_LE(0, start_counter_);
- DCHECK((collector == SCAVENGER && current_.type == Event::SCAVENGER) ||
- (collector == MINOR_MARK_COMPACTOR &&
+ DCHECK((collector == GarbageCollector::SCAVENGER &&
+ current_.type == Event::SCAVENGER) ||
+ (collector == GarbageCollector::MINOR_MARK_COMPACTOR &&
current_.type == Event::MINOR_MARK_COMPACTOR) ||
- (collector == MARK_COMPACTOR &&
+ (collector == GarbageCollector::MARK_COMPACTOR &&
(current_.type == Event::MARK_COMPACTOR ||
current_.type == Event::INCREMENTAL_MARK_COMPACTOR)));
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index d5bdf513a6..6daeadc94b 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -7,6 +7,7 @@
#include "include/v8-metrics.h"
#include "src/base/compiler-specific.h"
+#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
@@ -31,23 +32,24 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
#define TRACE_GC_CATEGORIES \
"devtools.timeline," TRACE_DISABLED_BY_DEFAULT("v8.gc")
-#define TRACE_GC(tracer, scope_id) \
- GCTracer::Scope::ScopeId gc_tracer_scope_id(scope_id); \
- GCTracer::Scope gc_tracer_scope(tracer, gc_tracer_scope_id, \
- ThreadKind::kMain); \
- TRACE_EVENT0(TRACE_GC_CATEGORIES, GCTracer::Scope::Name(gc_tracer_scope_id))
-
-#define TRACE_GC1(tracer, scope_id, thread_kind) \
- GCTracer::Scope::ScopeId gc_tracer_scope_id(scope_id); \
- GCTracer::Scope gc_tracer_scope(tracer, gc_tracer_scope_id, thread_kind); \
- TRACE_EVENT0(TRACE_GC_CATEGORIES, GCTracer::Scope::Name(gc_tracer_scope_id))
-
-#define TRACE_GC_EPOCH(tracer, scope_id, thread_kind) \
- GCTracer::Scope::ScopeId gc_tracer_scope_id(scope_id); \
- GCTracer::Scope gc_tracer_scope(tracer, gc_tracer_scope_id, thread_kind); \
- CollectionEpoch gc_tracer_epoch = tracer->CurrentEpoch(scope_id); \
- TRACE_EVENT1(TRACE_GC_CATEGORIES, GCTracer::Scope::Name(gc_tracer_scope_id), \
- "epoch", gc_tracer_epoch)
+#define TRACE_GC(tracer, scope_id) \
+ GCTracer::Scope UNIQUE_IDENTIFIER(gc_tracer_scope)( \
+ tracer, GCTracer::Scope::ScopeId(scope_id), ThreadKind::kMain); \
+ TRACE_EVENT0(TRACE_GC_CATEGORIES, \
+ GCTracer::Scope::Name(GCTracer::Scope::ScopeId(scope_id)))
+
+#define TRACE_GC1(tracer, scope_id, thread_kind) \
+ GCTracer::Scope UNIQUE_IDENTIFIER(gc_tracer_scope)( \
+ tracer, GCTracer::Scope::ScopeId(scope_id), thread_kind); \
+ TRACE_EVENT0(TRACE_GC_CATEGORIES, \
+ GCTracer::Scope::Name(GCTracer::Scope::ScopeId(scope_id)))
+
+#define TRACE_GC_EPOCH(tracer, scope_id, thread_kind) \
+ GCTracer::Scope UNIQUE_IDENTIFIER(gc_tracer_scope)( \
+ tracer, GCTracer::Scope::ScopeId(scope_id), thread_kind); \
+ TRACE_EVENT1(TRACE_GC_CATEGORIES, \
+ GCTracer::Scope::Name(GCTracer::Scope::ScopeId(scope_id)), \
+ "epoch", tracer->CurrentEpoch(scope_id))
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
@@ -59,11 +61,11 @@ class V8_EXPORT_PRIVATE GCTracer {
struct IncrementalMarkingInfos {
IncrementalMarkingInfos() : duration(0), longest_step(0), steps(0) {}
- void Update(double duration) {
+ void Update(double delta) {
steps++;
- this->duration += duration;
- if (duration > longest_step) {
- longest_step = duration;
+ duration += delta;
+ if (delta > longest_step) {
+ longest_step = delta;
}
}
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 7c8a2f54d6..9b998ea6af 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -43,6 +43,7 @@
#include "src/objects/scope-info.h"
#include "src/objects/slots-inl.h"
#include "src/objects/struct-inl.h"
+#include "src/objects/visitors-inl.h"
#include "src/profiler/heap-profiler.h"
#include "src/strings/string-hasher.h"
#include "src/utils/ostreams.h"
@@ -769,6 +770,9 @@ bool Heap::HasDirtyJSFinalizationRegistries() {
return !dirty_js_finalization_registries_list().IsUndefined(isolate());
}
+VerifyPointersVisitor::VerifyPointersVisitor(Heap* heap)
+ : ObjectVisitorWithCageBases(heap), heap_(heap) {}
+
AlwaysAllocateScope::AlwaysAllocateScope(Heap* heap) : heap_(heap) {
heap_->always_allocate_scope_count_++;
}
@@ -784,12 +788,12 @@ CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
: heap_(heap) {
if (heap_->write_protect_code_memory()) {
heap_->increment_code_space_memory_modification_scope_depth();
- heap_->code_space()->SetReadAndWritable();
+ heap_->code_space()->SetCodeModificationPermissions();
LargePage* page = heap_->code_lo_space()->first_page();
while (page != nullptr) {
DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
- page->SetReadAndWritable();
+ page->SetCodeModificationPermissions();
page = page->next_page();
}
}
@@ -847,7 +851,7 @@ CodePageMemoryModificationScope::CodePageMemoryModificationScope(
if (scope_active_) {
DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
(chunk_->owner()->identity() == CODE_LO_SPACE));
- MemoryChunk::cast(chunk_)->SetReadAndWritable();
+ MemoryChunk::cast(chunk_)->SetCodeModificationPermissions();
}
}
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index fa009bac9f..4a57a1678e 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -31,7 +31,7 @@
#include "src/execution/runtime-profiler.h"
#include "src/execution/v8threads.h"
#include "src/execution/vm-state-inl.h"
-#include "src/handles/global-handles.h"
+#include "src/handles/global-handles-inl.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/barrier.h"
#include "src/heap/base/stack.h"
@@ -461,18 +461,18 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
if (space != NEW_SPACE && space != NEW_LO_SPACE) {
isolate_->counters()->gc_compactor_caused_by_request()->Increment();
*reason = "GC in old space requested";
- return MARK_COMPACTOR;
+ return GarbageCollector::MARK_COMPACTOR;
}
if (FLAG_gc_global || ShouldStressCompaction() || !new_space()) {
*reason = "GC in old space forced by flags";
- return MARK_COMPACTOR;
+ return GarbageCollector::MARK_COMPACTOR;
}
if (incremental_marking()->NeedsFinalization() &&
AllocationLimitOvershotByLargeMargin()) {
*reason = "Incremental marking needs finalization";
- return MARK_COMPACTOR;
+ return GarbageCollector::MARK_COMPACTOR;
}
if (!CanPromoteYoungAndExpandOldGeneration(0)) {
@@ -480,7 +480,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
->gc_compactor_caused_by_oldspace_exhaustion()
->Increment();
*reason = "scavenge might not succeed";
- return MARK_COMPACTOR;
+ return GarbageCollector::MARK_COMPACTOR;
}
// Default
@@ -792,16 +792,16 @@ void Heap::PrintRetainingPath(HeapObject target, RetainingPathOption option) {
}
int distance = static_cast<int>(retaining_path.size());
for (auto node : retaining_path) {
- HeapObject object = node.first;
- bool ephemeron = node.second;
+ HeapObject node_object = node.first;
+ bool node_ephemeron = node.second;
PrintF("\n");
PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
PrintF("Distance from root %d%s: ", distance,
- ephemeron ? " (ephemeron)" : "");
- object.ShortPrint();
+ node_ephemeron ? " (ephemeron)" : "");
+ node_object.ShortPrint();
PrintF("\n");
#ifdef OBJECT_PRINT
- object.Print();
+ node_object.Print();
PrintF("\n");
#endif
--distance;
@@ -978,10 +978,11 @@ size_t Heap::UsedGlobalHandlesSize() {
void Heap::MergeAllocationSitePretenuringFeedback(
const PretenuringFeedbackMap& local_pretenuring_feedback) {
+ PtrComprCageBase cage_base(isolate());
AllocationSite site;
for (auto& site_and_count : local_pretenuring_feedback) {
site = site_and_count.first;
- MapWord map_word = site_and_count.first.map_word(kRelaxedLoad);
+ MapWord map_word = site.map_word(cage_base, kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
site = AllocationSite::cast(map_word.ToForwardingAddress());
}
@@ -1003,7 +1004,6 @@ void Heap::MergeAllocationSitePretenuringFeedback(
void Heap::AddAllocationObserversToAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer) {
DCHECK(observer && new_space_observer);
- SafepointScope scope(this);
for (SpaceIterator it(this); it.HasNext();) {
Space* space = it.Next();
@@ -1018,7 +1018,6 @@ void Heap::AddAllocationObserversToAllSpaces(
void Heap::RemoveAllocationObserversFromAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer) {
DCHECK(observer && new_space_observer);
- SafepointScope scope(this);
for (SpaceIterator it(this); it.HasNext();) {
Space* space = it.Next();
@@ -1181,8 +1180,8 @@ void Heap::ProcessPretenuringFeedback() {
// Step 2: Pretenure allocation sites for manual requests.
if (allocation_sites_to_pretenure_) {
while (!allocation_sites_to_pretenure_->empty()) {
- auto site = allocation_sites_to_pretenure_->Pop();
- if (PretenureAllocationSiteManually(isolate_, site)) {
+ auto pretenure_site = allocation_sites_to_pretenure_->Pop();
+ if (PretenureAllocationSiteManually(isolate_, pretenure_site)) {
trigger_deoptimization = true;
}
}
@@ -1254,7 +1253,7 @@ void Heap::DeoptMarkedAllocationSites() {
}
void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
- if (collector == MARK_COMPACTOR) {
+ if (collector == GarbageCollector::MARK_COMPACTOR) {
memory_pressure_level_.store(MemoryPressureLevel::kNone,
std::memory_order_relaxed);
}
@@ -1686,6 +1685,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced ||
current_gc_flags_ & kForcedGC ||
force_gc_on_next_allocation_;
+ is_current_gc_for_heap_profiler_ =
+ gc_reason == GarbageCollectionReason::kHeapProfiler;
if (force_gc_on_next_allocation_) force_gc_on_next_allocation_ = false;
DevToolsTraceEventScope devtools_trace_event_scope(
@@ -1728,7 +1729,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
size_t committed_memory_before = 0;
- if (collector == MARK_COMPACTOR) {
+ if (collector == GarbageCollector::MARK_COMPACTOR) {
committed_memory_before = CommittedOldGenerationMemory();
if (cpp_heap()) {
// CppHeap needs a stack marker at the top of all entry points to allow
@@ -1765,8 +1766,9 @@ bool Heap::CollectGarbage(AllocationSpace space,
PROFILE(isolate_, CodeMovingGCEvent());
}
- GCType gc_type = collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact
- : kGCTypeScavenge;
+ GCType gc_type = collector == GarbageCollector::MARK_COMPACTOR
+ ? kGCTypeMarkSweepCompact
+ : kGCTypeScavenge;
{
GCCallbacksScope scope(this);
// Temporary override any embedder stack state as callbacks may create
@@ -1778,7 +1780,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
AllowGarbageCollection allow_gc;
AllowJavascriptExecution allow_js(isolate());
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
- VMState<EXTERNAL> state(isolate_);
+ VMState<EXTERNAL> callback_state(isolate_);
HandleScope handle_scope(isolate_);
CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
}
@@ -1790,10 +1792,11 @@ bool Heap::CollectGarbage(AllocationSpace space,
freed_global_handles +=
PerformGarbageCollection(collector, gc_callback_flags);
}
- // Clear is_current_gc_forced now that the current GC is complete. Do this
- // before GarbageCollectionEpilogue() since that could trigger another
- // unforced GC.
+ // Clear flags describing the current GC now that the current GC is
+ // complete. Do this before GarbageCollectionEpilogue() since that could
+ // trigger another unforced GC.
is_current_gc_forced_ = false;
+ is_current_gc_for_heap_profiler_ = false;
{
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
@@ -1814,22 +1817,24 @@ bool Heap::CollectGarbage(AllocationSpace space,
AllowGarbageCollection allow_gc;
AllowJavascriptExecution allow_js(isolate());
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
- VMState<EXTERNAL> state(isolate_);
+ VMState<EXTERNAL> callback_state(isolate_);
HandleScope handle_scope(isolate_);
CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
}
}
- if (collector == MARK_COMPACTOR || collector == SCAVENGER) {
+ if (collector == GarbageCollector::MARK_COMPACTOR ||
+ collector == GarbageCollector::SCAVENGER) {
tracer()->RecordGCPhasesHistograms(gc_type_timer);
}
}
GarbageCollectionEpilogue();
- if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
+ if (collector == GarbageCollector::MARK_COMPACTOR &&
+ FLAG_track_detached_contexts) {
isolate()->CheckDetachedContextsAfterGC();
}
- if (collector == MARK_COMPACTOR) {
+ if (collector == GarbageCollector::MARK_COMPACTOR) {
// Calculate used memory first, then committed memory. Following code
// assumes that committed >= used, which might not hold when this is
// calculated in the wrong order and background threads allocate
@@ -1858,7 +1863,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
tracer()->Stop(collector);
}
- if (collector == MARK_COMPACTOR &&
+ if (collector == GarbageCollector::MARK_COMPACTOR &&
(gc_callback_flags & (kGCCallbackFlagForced |
kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
isolate()->CountUsage(v8::Isolate::kForcedGC);
@@ -2150,11 +2155,11 @@ void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
namespace {
GCTracer::Scope::ScopeId CollectorScopeId(GarbageCollector collector) {
switch (collector) {
- case MARK_COMPACTOR:
+ case GarbageCollector::MARK_COMPACTOR:
return GCTracer::Scope::ScopeId::MARK_COMPACTOR;
- case MINOR_MARK_COMPACTOR:
+ case GarbageCollector::MINOR_MARK_COMPACTOR:
return GCTracer::Scope::ScopeId::MINOR_MARK_COMPACTOR;
- case SCAVENGER:
+ case GarbageCollector::SCAVENGER:
return GCTracer::Scope::ScopeId::SCAVENGER;
}
UNREACHABLE();
@@ -2201,13 +2206,13 @@ size_t Heap::PerformGarbageCollection(
NewSpaceSize() + (new_lo_space() ? new_lo_space()->SizeOfObjects() : 0);
switch (collector) {
- case MARK_COMPACTOR:
+ case GarbageCollector::MARK_COMPACTOR:
MarkCompact();
break;
- case MINOR_MARK_COMPACTOR:
+ case GarbageCollector::MINOR_MARK_COMPACTOR:
MinorMarkCompact();
break;
- case SCAVENGER:
+ case GarbageCollector::SCAVENGER:
Scavenge();
break;
}
@@ -2217,14 +2222,14 @@ size_t Heap::PerformGarbageCollection(
UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size));
ConfigureInitialOldGenerationSize();
- if (collector != MARK_COMPACTOR) {
+ if (collector != GarbageCollector::MARK_COMPACTOR) {
// Objects that died in the new space might have been accounted
// as bytes marked ahead of schedule by the incremental marker.
incremental_marking()->UpdateMarkedBytesAfterScavenge(
start_young_generation_size - SurvivedYoungObjectSize());
}
- if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
+ if (!fast_promotion_mode_ || collector == GarbageCollector::MARK_COMPACTOR) {
ComputeFastPromotionMode();
}
@@ -2245,7 +2250,7 @@ size_t Heap::PerformGarbageCollection(
isolate_->global_handles()->InvokeFirstPassWeakCallbacks();
}
- if (collector == MARK_COMPACTOR) {
+ if (collector == GarbageCollector::MARK_COMPACTOR) {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EMBEDDER_TRACING_EPILOGUE);
// TraceEpilogue may trigger operations that invalidate global handles. It
// has to be called *after* all other operations that potentially touch and
@@ -2284,7 +2289,7 @@ void Heap::PerformSharedGarbageCollection(Isolate* initiator,
base::MutexGuard guard(isolate()->client_isolate_mutex());
const char* collector_reason = nullptr;
- GarbageCollector collector = MARK_COMPACTOR;
+ GarbageCollector collector = GarbageCollector::MARK_COMPACTOR;
tracer()->Start(collector, gc_reason, collector_reason);
@@ -2302,7 +2307,7 @@ void Heap::PerformSharedGarbageCollection(Isolate* initiator,
client_heap->shared_map_allocator_->FreeLinearAllocationArea();
});
- PerformGarbageCollection(MARK_COMPACTOR);
+ PerformGarbageCollection(GarbageCollector::MARK_COMPACTOR);
isolate()->IterateClientIsolates([initiator](Isolate* client) {
GlobalSafepoint::StopMainThread stop_main_thread =
@@ -2359,7 +2364,7 @@ void Heap::UpdateCurrentEpoch(GarbageCollector collector) {
void Heap::UpdateEpochFull() { epoch_full_ = next_epoch(); }
void Heap::RecomputeLimits(GarbageCollector collector) {
- if (!((collector == MARK_COMPACTOR) ||
+ if (!((collector == GarbageCollector::MARK_COMPACTOR) ||
(HasLowYoungGenerationAllocationRate() &&
old_generation_size_configured_))) {
return;
@@ -2391,7 +2396,7 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
size_t new_space_capacity = NewSpaceCapacity();
HeapGrowingMode mode = CurrentHeapGrowingMode();
- if (collector == MARK_COMPACTOR) {
+ if (collector == GarbageCollector::MARK_COMPACTOR) {
external_memory_.ResetAfterGC();
set_old_generation_allocation_limit(
@@ -2675,7 +2680,7 @@ void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk,
guard.emplace(&unprotected_memory_chunks_mutex_);
}
if (unprotected_memory_chunks_.insert(chunk).second) {
- chunk->SetReadAndWritable();
+ chunk->SetCodeModificationPermissions();
}
}
}
@@ -2727,8 +2732,9 @@ void Heap::UpdateExternalString(String string, size_t old_payload,
String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
FullObjectSlot p) {
+ PtrComprCageBase cage_base(heap->isolate());
HeapObject obj = HeapObject::cast(*p);
- MapWord first_word = obj.map_word(kRelaxedLoad);
+ MapWord first_word = obj.map_word(cage_base, kRelaxedLoad);
String new_string;
@@ -2736,9 +2742,9 @@ String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
if (!first_word.IsForwardingAddress()) {
// Unreachable external string can be finalized.
String string = String::cast(obj);
- if (!string.IsExternalString()) {
+ if (!string.IsExternalString(cage_base)) {
// Original external string has been internalized.
- DCHECK(string.IsThinString());
+ DCHECK(string.IsThinString(cage_base));
return String();
}
heap->FinalizeExternalString(string);
@@ -2750,10 +2756,10 @@ String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
}
// String is still reachable.
- if (new_string.IsThinString()) {
+ if (new_string.IsThinString(cage_base)) {
// Filtering Thin strings out of the external string table.
return String();
- } else if (new_string.IsExternalString()) {
+ } else if (new_string.IsExternalString(cage_base)) {
MemoryChunk::MoveExternalBackingStoreBytes(
ExternalBackingStoreType::kExternalString,
Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
@@ -2762,7 +2768,7 @@ String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
}
// Internalization can replace external strings with non-external strings.
- return new_string.IsExternalString() ? new_string : String();
+ return new_string.IsExternalString(cage_base) ? new_string : String();
}
void Heap::ExternalStringTable::VerifyYoung() {
@@ -3849,14 +3855,14 @@ class SlotCollectingVisitor final : public ObjectVisitor {
MaybeObjectSlot slot(int i) { return slots_[i]; }
#if V8_EXTERNAL_CODE_SPACE
- ObjectSlot code_slot(int i) { return code_slots_[i]; }
+ CodeObjectSlot code_slot(int i) { return code_slots_[i]; }
int number_of_code_slots() { return static_cast<int>(code_slots_.size()); }
#endif
private:
std::vector<MaybeObjectSlot> slots_;
#if V8_EXTERNAL_CODE_SPACE
- std::vector<ObjectSlot> code_slots_;
+ std::vector<CodeObjectSlot> code_slots_;
#endif
};
@@ -4143,11 +4149,13 @@ void Heap::RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
void Heap::AppendArrayBufferExtension(JSArrayBuffer object,
ArrayBufferExtension* extension) {
+ // ArrayBufferSweeper is managing all counters and updating Heap counters.
array_buffer_sweeper_->Append(object, extension);
}
void Heap::DetachArrayBufferExtension(JSArrayBuffer object,
ArrayBufferExtension* extension) {
+ // ArrayBufferSweeper is managing all counters and updating Heap counters.
return array_buffer_sweeper_->Detach(object, extension);
}
@@ -4441,11 +4449,11 @@ void Heap::VerifyReadOnlyHeap() {
read_only_space_->Verify(isolate());
}
-class SlotVerifyingVisitor : public ObjectVisitor {
+class SlotVerifyingVisitor : public ObjectVisitorWithCageBases {
public:
- SlotVerifyingVisitor(std::set<Address>* untyped,
+ SlotVerifyingVisitor(Isolate* isolate, std::set<Address>* untyped,
std::set<std::pair<SlotType, Address>>* typed)
- : untyped_(untyped), typed_(typed) {}
+ : ObjectVisitorWithCageBases(isolate), untyped_(untyped), typed_(typed) {}
virtual bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) = 0;
@@ -4453,7 +4461,8 @@ class SlotVerifyingVisitor : public ObjectVisitor {
ObjectSlot end) override {
#ifdef DEBUG
for (ObjectSlot slot = start; slot < end; ++slot) {
- DCHECK(!MapWord::IsPacked((*slot).ptr()) || !HasWeakHeapObjectTag(*slot));
+ Object obj = slot.load(cage_base());
+ CHECK(!MapWord::IsPacked(obj.ptr()) || !HasWeakHeapObjectTag(obj));
}
#endif // DEBUG
VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
@@ -4462,7 +4471,7 @@ class SlotVerifyingVisitor : public ObjectVisitor {
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) final {
for (MaybeObjectSlot slot = start; slot < end; ++slot) {
- if (ShouldHaveBeenRecorded(host, *slot)) {
+ if (ShouldHaveBeenRecorded(host, slot.load(cage_base()))) {
CHECK_GT(untyped_->count(slot.address()), 0);
}
}
@@ -4470,11 +4479,8 @@ class SlotVerifyingVisitor : public ObjectVisitor {
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base =
- GetPtrComprCageBaseFromOnHeapAddress(slot.address());
if (ShouldHaveBeenRecorded(
- host, MaybeObject::FromObject(slot.load(code_cage_base)))) {
+ host, MaybeObject::FromObject(slot.load(code_cage_base())))) {
CHECK_GT(untyped_->count(slot.address()), 0);
}
}
@@ -4490,7 +4496,7 @@ class SlotVerifyingVisitor : public ObjectVisitor {
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- Object target = rinfo->target_object();
+ Object target = rinfo->target_object_no_host(cage_base());
if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
CHECK(
InTypedSet(FULL_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
@@ -4519,10 +4525,10 @@ class SlotVerifyingVisitor : public ObjectVisitor {
class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
public:
- OldToNewSlotVerifyingVisitor(std::set<Address>* untyped,
+ OldToNewSlotVerifyingVisitor(Isolate* isolate, std::set<Address>* untyped,
std::set<std::pair<SlotType, Address>>* typed,
EphemeronRememberedSet* ephemeron_remembered_set)
- : SlotVerifyingVisitor(untyped, typed),
+ : SlotVerifyingVisitor(isolate, untyped, typed),
ephemeron_remembered_set_(ephemeron_remembered_set) {}
bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) override {
@@ -4602,7 +4608,8 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
std::set<std::pair<SlotType, Address>> typed_old_to_new;
if (!InYoungGeneration(object)) {
CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
- OldToNewSlotVerifyingVisitor visitor(&old_to_new, &typed_old_to_new,
+ OldToNewSlotVerifyingVisitor visitor(isolate(), &old_to_new,
+ &typed_old_to_new,
&this->ephemeron_remembered_set_);
object.IterateBody(&visitor);
}
@@ -6364,10 +6371,10 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
static constexpr intptr_t kLogicalChunkAlignmentMask =
kLogicalChunkAlignment - 1;
- class MarkingVisitor : public ObjectVisitor, public RootVisitor {
+ class MarkingVisitor : public ObjectVisitorWithCageBases, public RootVisitor {
public:
explicit MarkingVisitor(UnreachableObjectsFilter* filter)
- : filter_(filter) {}
+ : ObjectVisitorWithCageBases(filter->heap_), filter_(filter) {}
void VisitMapPointer(HeapObject object) override {
MarkHeapObject(Map::unchecked_cast(object.map()));
@@ -6384,9 +6391,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base = GetPtrComprCageBase(host);
- HeapObject code = HeapObject::unchecked_cast(slot.load(code_cage_base));
+ HeapObject code = HeapObject::unchecked_cast(slot.load(code_cage_base()));
MarkHeapObject(code);
}
@@ -6395,7 +6400,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
MarkHeapObject(target);
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
- MarkHeapObject(rinfo->target_object());
+ MarkHeapObject(rinfo->target_object_no_host(cage_base()));
}
void VisitRootPointers(Root root, const char* description,
@@ -6424,9 +6429,8 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
template <typename TSlot>
V8_INLINE void MarkPointersImpl(TSlot start, TSlot end) {
// Treat weak references as strong.
- Isolate* isolate = filter_->heap_->isolate();
for (TSlot p = start; p < end; ++p) {
- typename TSlot::TObject object = p.load(isolate);
+ typename TSlot::TObject object = p.load(cage_base());
HeapObject heap_object;
if (object.GetHeapObject(&heap_object)) {
MarkHeapObject(heap_object);
@@ -6865,9 +6869,7 @@ void VerifyPointersVisitor::VisitPointers(HeapObject host,
void VerifyPointersVisitor::VisitCodePointer(HeapObject host,
CodeObjectSlot slot) {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base = GetPtrComprCageBase(host);
- Object maybe_code = slot.load(code_cage_base);
+ Object maybe_code = slot.load(code_cage_base());
HeapObject code;
if (maybe_code.GetHeapObject(&code)) {
VerifyCodeObjectImpl(code);
@@ -6892,22 +6894,20 @@ void VerifyPointersVisitor::VisitRootPointers(Root root,
void VerifyPointersVisitor::VerifyHeapObjectImpl(HeapObject heap_object) {
CHECK(IsValidHeapObject(heap_, heap_object));
- CHECK(heap_object.map().IsMap());
+ CHECK(heap_object.map(cage_base()).IsMap());
}
void VerifyPointersVisitor::VerifyCodeObjectImpl(HeapObject heap_object) {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
CHECK(IsValidCodeObject(heap_, heap_object));
- PtrComprCageBase cage_base(heap_->isolate());
- CHECK(heap_object.map(cage_base).IsMap(cage_base));
- CHECK(heap_object.map(cage_base).instance_type() == CODE_TYPE);
+ CHECK(heap_object.map(cage_base()).IsMap());
+ CHECK(heap_object.map(cage_base()).instance_type() == CODE_TYPE);
}
template <typename TSlot>
void VerifyPointersVisitor::VerifyPointersImpl(TSlot start, TSlot end) {
- Isolate* isolate = heap_->isolate();
for (TSlot slot = start; slot < end; ++slot) {
- typename TSlot::TObject object = slot.load(isolate);
+ typename TSlot::TObject object = slot.load(cage_base());
HeapObject heap_object;
if (object.GetHeapObject(&heap_object)) {
VerifyHeapObjectImpl(heap_object);
@@ -6935,7 +6935,7 @@ void VerifyPointersVisitor::VisitCodeTarget(Code host, RelocInfo* rinfo) {
}
void VerifyPointersVisitor::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
- VerifyHeapObjectImpl(rinfo->target_object());
+ VerifyHeapObjectImpl(rinfo->target_object_no_host(cage_base()));
}
void VerifySmisVisitor::VisitRootPointers(Root root, const char* description,
@@ -6997,9 +6997,17 @@ void Heap::CreateObjectStats() {
}
Map Heap::GcSafeMapOfCodeSpaceObject(HeapObject object) {
- MapWord map_word = object.map_word(kRelaxedLoad);
- return map_word.IsForwardingAddress() ? map_word.ToForwardingAddress().map()
- : map_word.ToMap();
+ PtrComprCageBase cage_base(isolate());
+ MapWord map_word = object.map_word(cage_base, kRelaxedLoad);
+ if (map_word.IsForwardingAddress()) {
+#if V8_EXTERNAL_CODE_SPACE
+ PtrComprCageBase code_cage_base(isolate()->code_cage_base());
+#else
+ PtrComprCageBase code_cage_base = cage_base;
+#endif
+ return map_word.ToForwardingAddress(code_cage_base).map(cage_base);
+ }
+ return map_word.ToMap();
}
Code Heap::GcSafeCastToCode(HeapObject object, Address inner_pointer) {
@@ -7089,11 +7097,18 @@ void Heap::GenerationalBarrierSlow(HeapObject object, Address slot,
void Heap::RecordEphemeronKeyWrite(EphemeronHashTable table, Address slot) {
DCHECK(ObjectInYoungGeneration(HeapObjectSlot(slot).ToHeapObject()));
- int slot_index = EphemeronHashTable::SlotToIndex(table.address(), slot);
- InternalIndex entry = EphemeronHashTable::IndexToEntry(slot_index);
- auto it =
- ephemeron_remembered_set_.insert({table, std::unordered_set<int>()});
- it.first->second.insert(entry.as_int());
+ if (FLAG_minor_mc) {
+ // Minor MC lacks support for specialized generational ephemeron barriers.
+ // The regular write barrier works as well but keeps more memory alive.
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(table);
+ RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
+ } else {
+ int slot_index = EphemeronHashTable::SlotToIndex(table.address(), slot);
+ InternalIndex entry = EphemeronHashTable::IndexToEntry(slot_index);
+ auto it =
+ ephemeron_remembered_set_.insert({table, std::unordered_set<int>()});
+ it.first->second.insert(entry.as_int());
+ }
}
void Heap::EphemeronKeyWriteBarrierFromCode(Address raw_object,
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 1a4ce1aaef..74aac82907 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -474,24 +474,26 @@ class Heap {
}
static inline bool IsYoungGenerationCollector(GarbageCollector collector) {
- return collector == SCAVENGER || collector == MINOR_MARK_COMPACTOR;
+ return collector == GarbageCollector::SCAVENGER ||
+ collector == GarbageCollector::MINOR_MARK_COMPACTOR;
}
static inline GarbageCollector YoungGenerationCollector() {
#if ENABLE_MINOR_MC
- return (FLAG_minor_mc) ? MINOR_MARK_COMPACTOR : SCAVENGER;
+ return (FLAG_minor_mc) ? GarbageCollector::MINOR_MARK_COMPACTOR
+ : GarbageCollector::SCAVENGER;
#else
- return SCAVENGER;
+ return GarbageCollector::SCAVENGER;
#endif // ENABLE_MINOR_MC
}
static inline const char* CollectorName(GarbageCollector collector) {
switch (collector) {
- case SCAVENGER:
+ case GarbageCollector::SCAVENGER:
return "Scavenger";
- case MARK_COMPACTOR:
+ case GarbageCollector::MARK_COMPACTOR:
return "Mark-Compact";
- case MINOR_MARK_COMPACTOR:
+ case GarbageCollector::MINOR_MARK_COMPACTOR:
return "Minor Mark-Compact";
}
return "Unknown collector";
@@ -1463,6 +1465,12 @@ class Heap {
bool is_current_gc_forced() const { return is_current_gc_forced_; }
+ // Returns whether the currently in-progress GC should avoid increasing the
+ // ages on any objects that live for a set number of collections.
+ bool ShouldCurrentGCKeepAgesUnchanged() const {
+ return is_current_gc_forced_ || is_current_gc_for_heap_profiler_;
+ }
+
// Returns the size of objects residing in non-new spaces.
// Excludes external memory held by those objects.
V8_EXPORT_PRIVATE size_t OldGenerationSizeOfObjects();
@@ -2450,6 +2458,7 @@ class Heap {
std::unique_ptr<GlobalSafepoint> safepoint_;
bool is_current_gc_forced_ = false;
+ bool is_current_gc_for_heap_profiler_ = false;
ExternalStringTable external_string_table_;
@@ -2656,9 +2665,10 @@ class V8_NODISCARD CodePageMemoryModificationScope {
// point into the heap to a location that has a map pointer at its first word.
// Caveat: Heap::Contains is an approximation because it can return true for
// objects in a heap space but above the allocation pointer.
-class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
+class VerifyPointersVisitor : public ObjectVisitorWithCageBases,
+ public RootVisitor {
public:
- explicit VerifyPointersVisitor(Heap* heap) : heap_(heap) {}
+ V8_INLINE explicit VerifyPointersVisitor(Heap* heap);
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override;
void VisitPointers(HeapObject host, MaybeObjectSlot start,
diff --git a/deps/v8/src/heap/large-spaces.cc b/deps/v8/src/heap/large-spaces.cc
index 6cc5a4a868..2cac8dc0a5 100644
--- a/deps/v8/src/heap/large-spaces.cc
+++ b/deps/v8/src/heap/large-spaces.cc
@@ -354,6 +354,7 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
+ PtrComprCageBase cage_base(isolate);
for (LargePage* chunk = first_page(); chunk != nullptr;
chunk = chunk->next_page()) {
// Each chunk contains an object that starts at the large object page's
@@ -364,23 +365,26 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
// The first word should be a map, and we expect all map pointers to be
// in map space or read-only space.
- Map map = object.map();
- CHECK(map.IsMap());
+ Map map = object.map(cage_base);
+ CHECK(map.IsMap(cage_base));
CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
// We have only the following types in the large object space:
- if (!(object.IsAbstractCode() || object.IsSeqString() ||
- object.IsExternalString() || object.IsThinString() ||
- object.IsFixedArray() || object.IsFixedDoubleArray() ||
- object.IsWeakFixedArray() || object.IsWeakArrayList() ||
- object.IsPropertyArray() || object.IsByteArray() ||
- object.IsFeedbackVector() || object.IsBigInt() ||
- object.IsFreeSpace() || object.IsFeedbackMetadata() ||
- object.IsContext() || object.IsUncompiledDataWithoutPreparseData() ||
- object.IsPreparseData()) &&
+ if (!(object.IsAbstractCode(cage_base) || object.IsSeqString(cage_base) ||
+ object.IsExternalString(cage_base) ||
+ object.IsThinString(cage_base) || object.IsFixedArray(cage_base) ||
+ object.IsFixedDoubleArray(cage_base) ||
+ object.IsWeakFixedArray(cage_base) ||
+ object.IsWeakArrayList(cage_base) ||
+ object.IsPropertyArray(cage_base) || object.IsByteArray(cage_base) ||
+ object.IsFeedbackVector(cage_base) || object.IsBigInt(cage_base) ||
+ object.IsFreeSpace(cage_base) ||
+ object.IsFeedbackMetadata(cage_base) || object.IsContext(cage_base) ||
+ object.IsUncompiledDataWithoutPreparseData(cage_base) ||
+ object.IsPreparseData(cage_base)) &&
!FLAG_young_generation_large_objects) {
FATAL("Found invalid Object (instance_type=%i) in large object space.",
- object.map().instance_type());
+ object.map(cage_base).instance_type());
}
// The object itself should look OK.
@@ -391,27 +395,27 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
}
// Byte arrays and strings don't have interior pointers.
- if (object.IsAbstractCode()) {
+ if (object.IsAbstractCode(cage_base)) {
VerifyPointersVisitor code_visitor(heap());
object.IterateBody(map, object.Size(), &code_visitor);
- } else if (object.IsFixedArray()) {
+ } else if (object.IsFixedArray(cage_base)) {
FixedArray array = FixedArray::cast(object);
for (int j = 0; j < array.length(); j++) {
Object element = array.get(j);
if (element.IsHeapObject()) {
HeapObject element_object = HeapObject::cast(element);
CHECK(IsValidHeapObject(heap(), element_object));
- CHECK(element_object.map().IsMap());
+ CHECK(element_object.map(cage_base).IsMap(cage_base));
}
}
- } else if (object.IsPropertyArray()) {
+ } else if (object.IsPropertyArray(cage_base)) {
PropertyArray array = PropertyArray::cast(object);
for (int j = 0; j < array.length(); j++) {
Object property = array.get(j);
if (property.IsHeapObject()) {
HeapObject property_object = HeapObject::cast(property);
CHECK(heap()->Contains(property_object));
- CHECK(property_object.map().IsMap());
+ CHECK(property_object.map(cage_base).IsMap(cage_base));
}
}
}
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 47865a6cc7..a623360197 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -127,16 +127,6 @@ void MainMarkingVisitor<MarkingState>::RecordRelocSlot(Code host,
MarkCompactCollector::RecordRelocSlot(host, rinfo, target);
}
-template <typename MarkingState>
-void MainMarkingVisitor<MarkingState>::MarkDescriptorArrayFromWriteBarrier(
- DescriptorArray descriptors, int number_of_own_descriptors) {
- // This is necessary because the Scavenger records slots only for the
- // promoted black objects and the marking visitor of DescriptorArray skips
- // the descriptors marked by the visitor.VisitDescriptors() below.
- this->MarkDescriptorArrayBlack(descriptors);
- this->VisitDescriptors(descriptors, number_of_own_descriptors);
-}
-
template <LiveObjectIterationMode mode>
LiveObjectRange<mode>::iterator::iterator(const MemoryChunk* chunk,
Bitmap* bitmap, Address start)
@@ -173,6 +163,7 @@ operator++(int) {
template <LiveObjectIterationMode mode>
void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
+ PtrComprCageBase cage_base(chunk_->heap()->isolate());
while (!it_.Done()) {
HeapObject object;
int size = 0;
@@ -208,10 +199,10 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
// make sure that we skip all set bits in the black area until the
// object ends.
HeapObject black_object = HeapObject::FromAddress(addr);
- Object map_object = black_object.map(kAcquireLoad);
- CHECK(map_object.IsMap());
+ Object map_object = black_object.map(cage_base, kAcquireLoad);
+ CHECK(map_object.IsMap(cage_base));
map = Map::cast(map_object);
- DCHECK(map.IsMap());
+ DCHECK(map.IsMap(cage_base));
size = black_object.SizeFromMap(map);
CHECK_LE(addr + size, chunk_->area_end());
Address end = addr + size - kTaggedSize;
@@ -240,10 +231,10 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
}
} else if ((mode == kGreyObjects || mode == kAllLiveObjects)) {
object = HeapObject::FromAddress(addr);
- Object map_object = object.map(kAcquireLoad);
- CHECK(map_object.IsMap());
+ Object map_object = object.map(cage_base, kAcquireLoad);
+ CHECK(map_object.IsMap(cage_base));
map = Map::cast(map_object);
- DCHECK(map.IsMap());
+ DCHECK(map.IsMap(cage_base));
size = object.SizeFromMap(map);
CHECK_LE(addr + size, chunk_->area_end());
}
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 83983ae820..3873374b0f 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -52,6 +52,7 @@
#include "src/objects/slots-inl.h"
#include "src/objects/smi.h"
#include "src/objects/transitions-inl.h"
+#include "src/objects/visitors.h"
#include "src/tasks/cancelable-task.h"
#include "src/tracing/tracing-category-observer.h"
#include "src/utils/utils-inl.h"
@@ -75,12 +76,13 @@ STATIC_ASSERT(Heap::kMinObjectSizeInTaggedWords >= 2);
#ifdef VERIFY_HEAP
namespace {
-class MarkingVerifier : public ObjectVisitor, public RootVisitor {
+class MarkingVerifier : public ObjectVisitorWithCageBases, public RootVisitor {
public:
virtual void Run() = 0;
protected:
- explicit MarkingVerifier(Heap* heap) : heap_(heap) {}
+ explicit MarkingVerifier(Heap* heap)
+ : ObjectVisitorWithCageBases(heap), heap_(heap) {}
virtual ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
const MemoryChunk* chunk) = 0;
@@ -235,10 +237,7 @@ class FullMarkingVerifier : public MarkingVerifier {
void VerifyCodePointer(CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base =
- GetPtrComprCageBaseFromOnHeapAddress(slot.address());
- Object maybe_code = slot.load(code_cage_base);
+ Object maybe_code = slot.load(code_cage_base());
HeapObject code;
if (maybe_code.GetHeapObject(&code)) {
VerifyHeapObjectImpl(code);
@@ -256,9 +255,9 @@ class FullMarkingVerifier : public MarkingVerifier {
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
- if (!host.IsWeakObject(rinfo->target_object())) {
- HeapObject object = rinfo->target_object();
- VerifyHeapObjectImpl(object);
+ HeapObject target_object = rinfo->target_object_no_host(cage_base());
+ if (!host.IsWeakObject(target_object)) {
+ VerifyHeapObjectImpl(target_object);
}
}
@@ -273,10 +272,8 @@ class FullMarkingVerifier : public MarkingVerifier {
template <typename TSlot>
V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
- PtrComprCageBase cage_base =
- GetPtrComprCageBaseFromOnHeapAddress(start.address());
for (TSlot slot = start; slot < end; ++slot) {
- typename TSlot::TObject object = slot.load(cage_base);
+ typename TSlot::TObject object = slot.load(cage_base());
HeapObject heap_object;
if (object.GetHeapObjectIfStrong(&heap_object)) {
VerifyHeapObjectImpl(heap_object);
@@ -287,7 +284,8 @@ class FullMarkingVerifier : public MarkingVerifier {
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
};
-class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
+class EvacuationVerifier : public ObjectVisitorWithCageBases,
+ public RootVisitor {
public:
virtual void Run() = 0;
@@ -314,7 +312,8 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
void VisitMapPointer(HeapObject object) override { VerifyMap(object.map()); }
protected:
- explicit EvacuationVerifier(Heap* heap) : heap_(heap) {}
+ explicit EvacuationVerifier(Heap* heap)
+ : ObjectVisitorWithCageBases(heap), heap_(heap) {}
inline Heap* heap() { return heap_; }
@@ -396,10 +395,8 @@ class FullEvacuationVerifier : public EvacuationVerifier {
template <typename TSlot>
void VerifyPointersImpl(TSlot start, TSlot end) {
- PtrComprCageBase cage_base =
- GetPtrComprCageBaseFromOnHeapAddress(start.address());
for (TSlot current = start; current < end; ++current) {
- typename TSlot::TObject object = current.load(cage_base);
+ typename TSlot::TObject object = current.load(cage_base());
HeapObject heap_object;
if (object.GetHeapObjectIfStrong(&heap_object)) {
VerifyHeapObjectImpl(heap_object);
@@ -415,10 +412,7 @@ class FullEvacuationVerifier : public EvacuationVerifier {
}
void VerifyCodePointer(CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base =
- GetPtrComprCageBaseFromOnHeapAddress(slot.address());
- Object maybe_code = slot.load(code_cage_base);
+ Object maybe_code = slot.load(code_cage_base());
HeapObject code;
if (maybe_code.GetHeapObject(&code)) {
VerifyHeapObjectImpl(code);
@@ -429,7 +423,7 @@ class FullEvacuationVerifier : public EvacuationVerifier {
VerifyHeapObjectImpl(target);
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- VerifyHeapObjectImpl(rinfo->target_object());
+ VerifyHeapObjectImpl(rinfo->target_object_no_host(cage_base()));
}
void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
VerifyPointersImpl(start, end);
@@ -565,7 +559,7 @@ void MarkCompactCollector::StartMarking() {
marking_visitor_ = std::make_unique<MarkingVisitor>(
marking_state(), local_marking_worklists(), weak_objects(), heap_,
epoch(), code_flush_mode(), heap_->local_embedder_heap_tracer()->InUse(),
- heap_->is_current_gc_forced());
+ heap_->ShouldCurrentGCKeepAgesUnchanged());
// Marking bits are cleared by the sweeper.
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
@@ -1019,7 +1013,8 @@ void MarkCompactCollector::Finish() {
void MarkCompactCollector::SweepArrayBufferExtensions() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH_SWEEP_ARRAY_BUFFERS);
- heap_->array_buffer_sweeper()->RequestSweepFull();
+ heap_->array_buffer_sweeper()->RequestSweep(
+ ArrayBufferSweeper::SweepingType::kFull);
}
class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
@@ -1065,24 +1060,26 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
// keep alive its embedded pointers (which would otherwise be dropped).
// - Prefix of the string table.
class MarkCompactCollector::CustomRootBodyMarkingVisitor final
- : public ObjectVisitor {
+ : public ObjectVisitorWithCageBases {
public:
explicit CustomRootBodyMarkingVisitor(MarkCompactCollector* collector)
- : collector_(collector) {}
+ : ObjectVisitorWithCageBases(collector->isolate()),
+ collector_(collector) {}
void VisitPointer(HeapObject host, ObjectSlot p) final {
- MarkObject(host, *p);
+ MarkObject(host, p.load(cage_base()));
}
- void VisitMapPointer(HeapObject host) final { MarkObject(host, host.map()); }
+ void VisitMapPointer(HeapObject host) final {
+ MarkObject(host, host.map(cage_base()));
+ }
void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
- PtrComprCageBase cage_base = GetPtrComprCageBase(host);
for (ObjectSlot p = start; p < end; ++p) {
// The map slot should be handled in VisitMapPointer.
DCHECK_NE(host.map_slot(), p);
- DCHECK(!HasWeakHeapObjectTag(p.load(cage_base)));
- MarkObject(host, p.load(cage_base));
+ DCHECK(!HasWeakHeapObjectTag(p.load(cage_base())));
+ MarkObject(host, p.load(cage_base()));
}
}
@@ -1105,7 +1102,7 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
MarkObject(host, target);
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- MarkObject(host, rinfo->target_object());
+ MarkObject(host, rinfo->target_object_no_host(cage_base()));
}
private:
@@ -1226,22 +1223,24 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
};
-class RecordMigratedSlotVisitor : public ObjectVisitor {
+class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
public:
explicit RecordMigratedSlotVisitor(
MarkCompactCollector* collector,
EphemeronRememberedSet* ephemeron_remembered_set)
- : collector_(collector),
+ : ObjectVisitorWithCageBases(collector->isolate()),
+ collector_(collector),
ephemeron_remembered_set_(ephemeron_remembered_set) {}
inline void VisitPointer(HeapObject host, ObjectSlot p) final {
- DCHECK(!HasWeakHeapObjectTag(*p));
- RecordMigratedSlot(host, MaybeObject::FromObject(*p), p.address());
+ DCHECK(!HasWeakHeapObjectTag(p.load(cage_base())));
+ RecordMigratedSlot(host, MaybeObject::FromObject(p.load(cage_base())),
+ p.address());
}
inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
- DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
- RecordMigratedSlot(host, *p, p.address());
+ DCHECK(!MapWord::IsPacked(p.Relaxed_Load(cage_base()).ptr()));
+ RecordMigratedSlot(host, p.load(cage_base()), p.address());
}
inline void VisitPointers(HeapObject host, ObjectSlot start,
@@ -1264,10 +1263,8 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
// This code is similar to the implementation of VisitPointer() modulo
// new kind of slot.
- DCHECK(!HasWeakHeapObjectTag(*slot));
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base = GetPtrComprCageBase(host);
- Object code = slot.load(code_cage_base);
+ DCHECK(!HasWeakHeapObjectTag(slot.load(code_cage_base())));
+ Object code = slot.load(code_cage_base());
RecordMigratedSlot(host, MaybeObject::FromObject(code), slot.address());
}
@@ -1301,7 +1298,8 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
inline void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
DCHECK_EQ(host, rinfo->host());
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
- HeapObject object = HeapObject::cast(rinfo->target_object());
+ HeapObject object =
+ HeapObject::cast(rinfo->target_object_no_host(cage_base()));
GenerationalBarrierForCode(host, rinfo, object);
collector_->RecordRelocSlot(host, rinfo, object);
}
@@ -1437,9 +1435,10 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
inline bool TryEvacuateObject(AllocationSpace target_space, HeapObject object,
int size, HeapObject* target_object) {
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap && AbortCompactionForTesting(object)) return false;
-#endif // VERIFY_HEAP
+#ifdef DEBUG
+ if (FLAG_stress_compaction && AbortCompactionForTesting(object))
+ return false;
+#endif // DEBUG
AllocationAlignment alignment = HeapObject::RequiredAlignment(object.map());
AllocationResult allocation = local_allocator_->Allocate(
target_space, size, AllocationOrigin::kGC, alignment);
@@ -1466,7 +1465,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
migration_function_(this, dst, src, size, dest);
}
-#ifdef VERIFY_HEAP
+#ifdef DEBUG
bool AbortCompactionForTesting(HeapObject object) {
if (FLAG_stress_compaction) {
const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
@@ -1483,7 +1482,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
}
return false;
}
-#endif // VERIFY_HEAP
+#endif // DEBUG
Heap* heap_;
EvacuationAllocator* local_allocator_;
@@ -1716,12 +1715,6 @@ void MarkCompactCollector::RevisitObject(HeapObject obj) {
marking_visitor_->Visit(obj.map(), obj);
}
-void MarkCompactCollector::MarkDescriptorArrayFromWriteBarrier(
- DescriptorArray descriptors, int number_of_own_descriptors) {
- marking_visitor_->MarkDescriptorArrayFromWriteBarrier(
- descriptors, number_of_own_descriptors);
-}
-
bool MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
int iterations = 0;
int max_iterations = FLAG_ephemeron_fixpoint_iterations;
@@ -2499,10 +2492,10 @@ bool MarkCompactCollector::TransitionArrayNeedsCompaction(
DCHECK_EQ(raw_target.ToSmi(), Smi::uninitialized_deserialization_value());
#ifdef DEBUG
// Targets can only be dead iff this array is fully deserialized.
- for (int i = 0; i < num_transitions; ++i) {
+ for (int j = 0; j < num_transitions; ++j) {
DCHECK_IMPLIES(
- !transitions.GetRawTarget(i).IsSmi(),
- !non_atomic_marking_state()->IsWhite(transitions.GetTarget(i)));
+ !transitions.GetRawTarget(j).IsSmi(),
+ !non_atomic_marking_state()->IsWhite(transitions.GetTarget(j)));
}
#endif
return false;
@@ -2510,8 +2503,8 @@ bool MarkCompactCollector::TransitionArrayNeedsCompaction(
TransitionsAccessor::GetTargetFromRaw(raw_target))) {
#ifdef DEBUG
// Targets can only be dead iff this array is fully deserialized.
- for (int i = 0; i < num_transitions; ++i) {
- DCHECK(!transitions.GetRawTarget(i).IsSmi());
+ for (int j = 0; j < num_transitions; ++j) {
+ DCHECK(!transitions.GetRawTarget(j).IsSmi());
}
#endif
return true;
@@ -2886,8 +2879,10 @@ static inline SlotCallbackResult UpdateSlot(PtrComprCageBase cage_base,
MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
Page::FromHeapObject(heap_obj)->IsFlagSet(
Page::COMPACTION_WAS_ABORTED));
- typename TSlot::TObject target =
- MakeSlotValue<TSlot, reference_type>(map_word.ToForwardingAddress());
+ PtrComprCageBase host_cage_base =
+ V8_EXTERNAL_CODE_SPACE_BOOL ? GetPtrComprCageBase(heap_obj) : cage_base;
+ typename TSlot::TObject target = MakeSlotValue<TSlot, reference_type>(
+ map_word.ToForwardingAddress(host_cage_base));
if (access_mode == AccessMode::NON_ATOMIC) {
slot.store(target);
} else {
@@ -2957,50 +2952,50 @@ static inline SlotCallbackResult UpdateStrongCodeSlot(
// Visitor for updating root pointers and to-space pointers.
// It does not expect to encounter pointers to dead objects.
-class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
+class PointersUpdatingVisitor : public ObjectVisitorWithCageBases,
+ public RootVisitor {
public:
- explicit PointersUpdatingVisitor(Heap* heap) : cage_base_(heap->isolate()) {}
+ explicit PointersUpdatingVisitor(Heap* heap)
+ : ObjectVisitorWithCageBases(heap) {}
void VisitPointer(HeapObject host, ObjectSlot p) override {
- UpdateStrongSlotInternal(cage_base_, p);
+ UpdateStrongSlotInternal(cage_base(), p);
}
void VisitPointer(HeapObject host, MaybeObjectSlot p) override {
- UpdateSlotInternal(cage_base_, p);
+ UpdateSlotInternal(cage_base(), p);
}
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
for (ObjectSlot p = start; p < end; ++p) {
- UpdateStrongSlotInternal(cage_base_, p);
+ UpdateStrongSlotInternal(cage_base(), p);
}
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) final {
for (MaybeObjectSlot p = start; p < end; ++p) {
- UpdateSlotInternal(cage_base_, p);
+ UpdateSlotInternal(cage_base(), p);
}
}
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base = cage_base_;
- UpdateStrongCodeSlot<AccessMode::NON_ATOMIC>(host, cage_base_,
- code_cage_base, slot);
+ UpdateStrongCodeSlot<AccessMode::NON_ATOMIC>(host, cage_base(),
+ code_cage_base(), slot);
}
void VisitRootPointer(Root root, const char* description,
FullObjectSlot p) override {
DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
- UpdateRootSlotInternal(cage_base_, p);
+ UpdateRootSlotInternal(cage_base(), p);
}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
for (FullObjectSlot p = start; p < end; ++p) {
- UpdateRootSlotInternal(cage_base_, p);
+ UpdateRootSlotInternal(cage_base(), p);
}
}
@@ -3008,7 +3003,7 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
OffHeapObjectSlot start,
OffHeapObjectSlot end) override {
for (OffHeapObjectSlot p = start; p < end; ++p) {
- UpdateRootSlotInternal(cage_base_, p);
+ UpdateRootSlotInternal(cage_base(), p);
}
}
@@ -3047,8 +3042,6 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
PtrComprCageBase cage_base, MaybeObjectSlot slot) {
return UpdateSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
}
-
- PtrComprCageBase cage_base_;
};
static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
@@ -3365,7 +3358,8 @@ void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
} else {
// Aborted compaction page. Actual processing happens on the main
// thread for simplicity reasons.
- collector_->ReportAbortedEvacuationCandidate(failed_object, chunk);
+ collector_->ReportAbortedEvacuationCandidate(failed_object.address(),
+ chunk);
}
}
break;
@@ -3433,27 +3427,24 @@ class PageEvacuationJob : public v8::JobTask {
};
template <class Evacuator, class Collector>
-void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
+size_t MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
Collector* collector,
std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items,
- MigrationObserver* migration_observer, const intptr_t live_bytes) {
- // Used for trace summary.
- double compaction_speed = 0;
- if (FLAG_trace_evacuation) {
- compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
+ MigrationObserver* migration_observer) {
+ base::Optional<ProfilingMigrationObserver> profiling_observer;
+ if (isolate()->LogObjectRelocation()) {
+ profiling_observer.emplace(heap());
}
-
- const bool profiling = isolate()->LogObjectRelocation();
- ProfilingMigrationObserver profiling_observer(heap());
-
- const size_t pages_count = evacuation_items.size();
std::vector<std::unique_ptr<v8::internal::Evacuator>> evacuators;
const int wanted_num_tasks = NumberOfParallelCompactionTasks();
for (int i = 0; i < wanted_num_tasks; i++) {
auto evacuator = std::make_unique<Evacuator>(collector);
- if (profiling) evacuator->AddObserver(&profiling_observer);
- if (migration_observer != nullptr)
+ if (profiling_observer) {
+ evacuator->AddObserver(&profiling_observer.value());
+ }
+ if (migration_observer) {
evacuator->AddObserver(migration_observer);
+ }
evacuators.push_back(std::move(evacuator));
}
V8::GetCurrentPlatform()
@@ -3461,21 +3452,10 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
std::make_unique<PageEvacuationJob>(
isolate(), &evacuators, std::move(evacuation_items)))
->Join();
-
- for (auto& evacuator : evacuators) evacuator->Finalize();
- evacuators.clear();
-
- if (FLAG_trace_evacuation) {
- PrintIsolate(isolate(),
- "%8.0f ms: evacuation-summary: parallel=%s pages=%zu "
- "wanted_tasks=%d cores=%d live_bytes=%" V8PRIdPTR
- " compaction_speed=%.f\n",
- isolate()->time_millis_since_init(),
- FLAG_parallel_compaction ? "yes" : "no", pages_count,
- wanted_num_tasks,
- V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1,
- live_bytes, compaction_speed);
+ for (auto& evacuator : evacuators) {
+ evacuator->Finalize();
}
+ return wanted_num_tasks;
}
bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes,
@@ -3488,6 +3468,26 @@ bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes,
heap()->CanExpandOldGeneration(live_bytes);
}
+namespace {
+
+void TraceEvacuation(Isolate* isolate, size_t pages_count,
+ size_t wanted_num_tasks, size_t live_bytes,
+ size_t aborted_pages) {
+ DCHECK(FLAG_trace_evacuation);
+ PrintIsolate(
+ isolate,
+ "%8.0f ms: evacuation-summary: parallel=%s pages=%zu "
+ "wanted_tasks=%zu cores=%d live_bytes=%" V8PRIdPTR
+ " compaction_speed=%.f aborted=%zu\n",
+ isolate->time_millis_since_init(),
+ FLAG_parallel_compaction ? "yes" : "no", pages_count, wanted_num_tasks,
+ V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1, live_bytes,
+ isolate->heap()->tracer()->CompactionSpeedInBytesPerMillisecond(),
+ aborted_pages);
+}
+
+} // namespace
+
void MarkCompactCollector::EvacuatePagesInParallel() {
std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items;
intptr_t live_bytes = 0;
@@ -3545,8 +3545,10 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
"MarkCompactCollector::EvacuatePagesInParallel", "pages",
evacuation_items.size());
- CreateAndExecuteEvacuationTasks<FullEvacuator>(
- this, std::move(evacuation_items), nullptr, live_bytes);
+ const size_t pages_count = evacuation_items.size();
+ const size_t wanted_num_tasks =
+ CreateAndExecuteEvacuationTasks<FullEvacuator>(
+ this, std::move(evacuation_items), nullptr);
// After evacuation there might still be swept pages that weren't
// added to one of the compaction space but still reside in the
@@ -3555,7 +3557,12 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
// in the sweeping or old-to-new remembered set.
sweeper()->MergeOldToNewRememberedSetsForSweptPages();
- PostProcessEvacuationCandidates();
+ const size_t aborted_pages = PostProcessEvacuationCandidates();
+
+ if (FLAG_trace_evacuation) {
+ TraceEvacuation(isolate(), pages_count, wanted_num_tasks, live_bytes,
+ aborted_pages);
+ }
}
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
@@ -3936,7 +3943,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
void UpdateUntypedPointers() {
if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
DCHECK_IMPLIES(
- collector == MARK_COMPACTOR,
+ collector == GarbageCollector::MARK_COMPACTOR,
chunk_->SweepingDone() &&
chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>() == nullptr);
@@ -3949,9 +3956,9 @@ class RememberedSetUpdatingItem : public UpdatingItem {
},
SlotSet::FREE_EMPTY_BUCKETS);
- DCHECK_IMPLIES(
- collector == MARK_COMPACTOR && FLAG_always_promote_young_mc,
- slots == 0);
+ DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR &&
+ FLAG_always_promote_young_mc,
+ slots == 0);
if (slots == 0) {
chunk_->ReleaseSlotSet<OLD_TO_NEW>();
@@ -3960,7 +3967,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
if (chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>()) {
DCHECK_IMPLIES(
- collector == MARK_COMPACTOR,
+ collector == GarbageCollector::MARK_COMPACTOR,
!chunk_->SweepingDone() &&
(chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>()) ==
nullptr);
@@ -3975,9 +3982,9 @@ class RememberedSetUpdatingItem : public UpdatingItem {
},
SlotSet::FREE_EMPTY_BUCKETS);
- DCHECK_IMPLIES(
- collector == MARK_COMPACTOR && FLAG_always_promote_young_mc,
- slots == 0);
+ DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR &&
+ FLAG_always_promote_young_mc,
+ slots == 0);
if (slots == 0) {
chunk_->ReleaseSweepingSlotSet();
@@ -4014,7 +4021,11 @@ class RememberedSetUpdatingItem : public UpdatingItem {
(chunk_->slot_set<OLD_TO_CODE, AccessMode::NON_ATOMIC>() !=
nullptr)) {
PtrComprCageBase cage_base = heap_->isolate();
- PtrComprCageBase code_cage_base = heap_->isolate();
+#if V8_EXTERNAL_CODE_SPACE
+ PtrComprCageBase code_cage_base(heap_->isolate()->code_cage_base());
+#else
+ PtrComprCageBase code_cage_base = cage_base;
+#endif
RememberedSet<OLD_TO_CODE>::Iterate(
chunk_,
[=](MaybeObjectSlot slot) {
@@ -4079,8 +4090,8 @@ std::unique_ptr<UpdatingItem> MarkCompactCollector::CreateToSpaceUpdatingItem(
std::unique_ptr<UpdatingItem>
MarkCompactCollector::CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
- return std::make_unique<
- RememberedSetUpdatingItem<NonAtomicMarkingState, MARK_COMPACTOR>>(
+ return std::make_unique<RememberedSetUpdatingItem<
+ NonAtomicMarkingState, GarbageCollector::MARK_COMPACTOR>>(
heap(), non_atomic_marking_state(), chunk, updating_mode);
}
@@ -4148,25 +4159,26 @@ class EphemeronTableUpdatingItem : public UpdatingItem {
void Process() override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"EphemeronTableUpdatingItem::Process");
+ PtrComprCageBase cage_base(heap_->isolate());
for (auto it = heap_->ephemeron_remembered_set_.begin();
it != heap_->ephemeron_remembered_set_.end();) {
EphemeronHashTable table = it->first;
auto& indices = it->second;
- if (table.map_word(kRelaxedLoad).IsForwardingAddress()) {
+ if (table.map_word(cage_base, kRelaxedLoad).IsForwardingAddress()) {
// The table has moved, and RecordMigratedSlotVisitor::VisitEphemeron
// inserts entries for the moved table into ephemeron_remembered_set_.
it = heap_->ephemeron_remembered_set_.erase(it);
continue;
}
- DCHECK(table.map().IsMap());
- DCHECK(table.Object::IsEphemeronHashTable());
+ DCHECK(table.map(cage_base).IsMap(cage_base));
+ DCHECK(table.IsEphemeronHashTable(cage_base));
for (auto iti = indices.begin(); iti != indices.end();) {
// EphemeronHashTable keys must be heap objects.
HeapObjectSlot key_slot(table.RawFieldOfElementAt(
EphemeronHashTable::EntryToIndex(InternalIndex(*iti))));
HeapObject key = key_slot.ToHeapObject();
- MapWord map_word = key.map_word(kRelaxedLoad);
+ MapWord map_word = key.map_word(cage_base, kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
key = map_word.ToForwardingAddress();
key_slot.StoreHeapObject(key);
@@ -4245,39 +4257,37 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
}
void MarkCompactCollector::ReportAbortedEvacuationCandidate(
- HeapObject failed_object, MemoryChunk* chunk) {
+ Address failed_start, MemoryChunk* chunk) {
base::MutexGuard guard(&mutex_);
aborted_evacuation_candidates_.push_back(
- std::make_pair(failed_object, static_cast<Page*>(chunk)));
+ std::make_pair(failed_start, static_cast<Page*>(chunk)));
}
-void MarkCompactCollector::PostProcessEvacuationCandidates() {
+size_t MarkCompactCollector::PostProcessEvacuationCandidates() {
CHECK_IMPLIES(FLAG_crash_on_aborted_evacuation,
aborted_evacuation_candidates_.empty());
- for (auto object_and_page : aborted_evacuation_candidates_) {
- HeapObject failed_object = object_and_page.first;
- Page* page = object_and_page.second;
+ for (auto start_and_page : aborted_evacuation_candidates_) {
+ Address failed_start = start_and_page.first;
+ Page* page = start_and_page.second;
page->SetFlag(Page::COMPACTION_WAS_ABORTED);
// Aborted compaction page. We have to record slots here, since we
// might not have recorded them in first place.
// Remove outdated slots.
- RememberedSetSweeping::RemoveRange(page, page->address(),
- failed_object.address(),
+ RememberedSetSweeping::RemoveRange(page, page->address(), failed_start,
SlotSet::FREE_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(),
- failed_object.address(),
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(), failed_start,
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
- failed_object.address());
+ failed_start);
// Remove invalidated slots.
- if (failed_object.address() > page->area_start()) {
+ if (failed_start > page->area_start()) {
InvalidatedSlotsCleanup old_to_new_cleanup =
InvalidatedSlotsCleanup::OldToNew(page);
- old_to_new_cleanup.Free(page->area_start(), failed_object.address());
+ old_to_new_cleanup.Free(page->area_start(), failed_start);
}
// Recompute live bytes.
@@ -4305,10 +4315,7 @@ void MarkCompactCollector::PostProcessEvacuationCandidates() {
}
}
DCHECK_EQ(aborted_pages_verified, aborted_pages);
- if (FLAG_trace_evacuation && (aborted_pages > 0)) {
- PrintIsolate(isolate(), "%8.0f ms: evacuation: aborted=%d\n",
- isolate()->time_millis_since_init(), aborted_pages);
- }
+ return aborted_pages;
}
void MarkCompactCollector::ReleaseEvacuationCandidates() {
@@ -4493,10 +4500,8 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
template <typename TSlot>
void VerifyPointersImpl(TSlot start, TSlot end) {
- PtrComprCageBase cage_base =
- GetPtrComprCageBaseFromOnHeapAddress(start.address());
for (TSlot current = start; current < end; ++current) {
- typename TSlot::TObject object = current.load(cage_base);
+ typename TSlot::TObject object = current.load(cage_base());
HeapObject heap_object;
if (object.GetHeapObject(&heap_object)) {
VerifyHeapObjectImpl(heap_object);
@@ -4512,10 +4517,7 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
}
void VerifyCodePointer(CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base =
- GetPtrComprCageBaseFromOnHeapAddress(slot.address());
- Code code = Code::unchecked_cast(slot.load(code_cage_base));
+ Code code = Code::unchecked_cast(slot.load(code_cage_base()));
VerifyHeapObjectImpl(code);
}
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
@@ -4523,7 +4525,7 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
VerifyHeapObjectImpl(target);
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- VerifyHeapObjectImpl(rinfo->target_object());
+ VerifyHeapObjectImpl(rinfo->target_object_no_host(cage_base()));
}
void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
VerifyPointersImpl(start, end);
@@ -4545,9 +4547,11 @@ class YoungGenerationMarkingVisitor final
: public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
public:
YoungGenerationMarkingVisitor(
- MinorMarkCompactCollector::MarkingState* marking_state,
+ Isolate* isolate, MinorMarkCompactCollector::MarkingState* marking_state,
MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
- : worklist_(global_worklist, task_id), marking_state_(marking_state) {}
+ : NewSpaceVisitor(isolate),
+ worklist_(global_worklist, task_id),
+ marking_state_(marking_state) {}
V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) final {
@@ -4631,7 +4635,7 @@ MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
: MarkCompactCollectorBase(heap),
worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
main_marking_visitor_(new YoungGenerationMarkingVisitor(
- marking_state(), worklist_, kMainMarker)),
+ heap->isolate(), marking_state(), worklist_, kMainMarker)),
page_parallel_job_semaphore_(0) {
static_assert(
kNumMarkers <= MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks,
@@ -4654,7 +4658,8 @@ void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
}
void MinorMarkCompactCollector::SweepArrayBufferExtensions() {
- heap_->array_buffer_sweeper()->RequestSweepYoung();
+ heap_->array_buffer_sweeper()->RequestSweep(
+ ArrayBufferSweeper::SweepingType::kYoung);
}
class YoungGenerationMigrationObserver final : public MigrationObserver {
@@ -4809,6 +4814,9 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
};
void MinorMarkCompactCollector::CollectGarbage() {
+ // Minor MC does not support processing the ephemeron remembered set.
+ DCHECK(heap()->ephemeron_remembered_set_.empty());
+
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
@@ -5027,8 +5035,8 @@ MinorMarkCompactCollector::CreateToSpaceUpdatingItem(MemoryChunk* chunk,
std::unique_ptr<UpdatingItem>
MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
- return std::make_unique<
- RememberedSetUpdatingItem<NonAtomicMarkingState, MINOR_MARK_COMPACTOR>>(
+ return std::make_unique<RememberedSetUpdatingItem<
+ NonAtomicMarkingState, GarbageCollector::MINOR_MARK_COMPACTOR>>(
heap(), non_atomic_marking_state(), chunk, updating_mode);
}
@@ -5043,7 +5051,7 @@ class YoungGenerationMarkingTask {
MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
: marking_worklist_(global_worklist, task_id),
marking_state_(collector->marking_state()),
- visitor_(marking_state_, global_worklist, task_id) {
+ visitor_(isolate, marking_state_, global_worklist, task_id) {
local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
Page::kPageSize);
}
@@ -5534,8 +5542,14 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
YoungGenerationMigrationObserver observer(heap(),
heap()->mark_compact_collector());
- CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
- this, std::move(evacuation_items), &observer, live_bytes);
+ const auto pages_count = evacuation_items.size();
+ const auto wanted_num_tasks =
+ CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
+ this, std::move(evacuation_items), &observer);
+
+ if (FLAG_trace_evacuation) {
+ TraceEvacuation(isolate(), pages_count, wanted_num_tasks, live_bytes, 0);
+ }
}
#endif // ENABLE_MINOR_MC
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 8be25e0914..5a7a450e38 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -219,11 +219,12 @@ class MarkCompactCollectorBase {
virtual std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) = 0;
+ // Returns the number of wanted compaction tasks.
template <class Evacuator, class Collector>
- void CreateAndExecuteEvacuationTasks(
+ size_t CreateAndExecuteEvacuationTasks(
Collector* collector,
std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items,
- MigrationObserver* migration_observer, const intptr_t live_bytes);
+ MigrationObserver* migration_observer);
// Returns whether this page should be moved according to heuristics.
bool ShouldMovePage(Page* p, intptr_t live_bytes, bool promote_young);
@@ -377,11 +378,12 @@ class MainMarkingVisitor final
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
base::EnumSet<CodeFlushMode> code_flush_mode,
- bool embedder_tracing_enabled, bool is_forced_gc)
+ bool embedder_tracing_enabled,
+ bool should_keep_ages_unchanged)
: MarkingVisitorBase<MainMarkingVisitor<MarkingState>, MarkingState>(
kMainThreadTask, local_marking_worklists, weak_objects, heap,
mark_compact_epoch, code_flush_mode, embedder_tracing_enabled,
- is_forced_gc),
+ should_keep_ages_unchanged),
marking_state_(marking_state),
revisiting_object_(false) {}
@@ -391,9 +393,6 @@ class MainMarkingVisitor final
V8_UNLIKELY(revisiting_object_);
}
- void MarkDescriptorArrayFromWriteBarrier(DescriptorArray descriptors,
- int number_of_own_descriptors);
-
private:
// Functions required by MarkingVisitorBase.
@@ -582,10 +581,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void VisitObject(HeapObject obj);
// Used by incremental marking for black-allocated objects.
void RevisitObject(HeapObject obj);
- // Ensures that all descriptors int range [0, number_of_own_descripts)
- // are visited.
- void MarkDescriptorArrayFromWriteBarrier(DescriptorArray array,
- int number_of_own_descriptors);
// Drains the main thread marking worklist until the specified number of
// bytes are processed. If the number of bytes is zero, then the worklist
@@ -723,8 +718,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
void ReleaseEvacuationCandidates();
- void PostProcessEvacuationCandidates();
- void ReportAbortedEvacuationCandidate(HeapObject failed_object,
+ // Returns number of aborted pages.
+ size_t PostProcessEvacuationCandidates();
+ void ReportAbortedEvacuationCandidate(Address failed_start,
MemoryChunk* chunk);
static const int kEphemeronChunkSize = 8 * KB;
@@ -778,7 +774,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Pages that are actually processed during evacuation.
std::vector<Page*> old_space_evacuation_pages_;
std::vector<Page*> new_space_evacuation_pages_;
- std::vector<std::pair<HeapObject, Page*>> aborted_evacuation_candidates_;
+ std::vector<std::pair<Address, Page*>> aborted_evacuation_candidates_;
Sweeper* sweeper_;
diff --git a/deps/v8/src/heap/marking-barrier-inl.h b/deps/v8/src/heap/marking-barrier-inl.h
index d03bdcb0f7..03e89a68e4 100644
--- a/deps/v8/src/heap/marking-barrier-inl.h
+++ b/deps/v8/src/heap/marking-barrier-inl.h
@@ -40,6 +40,21 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
return true;
}
+template <typename TSlot>
+inline void MarkingBarrier::MarkRange(HeapObject host, TSlot start, TSlot end) {
+ auto* isolate = heap_->isolate();
+ for (TSlot slot = start; slot < end; ++slot) {
+ typename TSlot::TObject object = slot.Relaxed_Load();
+ HeapObject heap_object;
+ // Mark both, weak and strong edges.
+ if (object.GetHeapObject(isolate, &heap_object)) {
+ if (MarkValue(host, heap_object) && is_compacting_) {
+ collector_->RecordSlot(host, HeapObjectSlot(slot), heap_object);
+ }
+ }
+ }
+}
+
bool MarkingBarrier::WhiteToGreyAndPush(HeapObject obj) {
if (marking_state_.WhiteToGrey(obj)) {
worklist_.Push(obj);
diff --git a/deps/v8/src/heap/marking-barrier.cc b/deps/v8/src/heap/marking-barrier.cc
index 06f2e67810..51b4605756 100644
--- a/deps/v8/src/heap/marking-barrier.cc
+++ b/deps/v8/src/heap/marking-barrier.cc
@@ -4,6 +4,7 @@
#include "src/heap/marking-barrier.h"
+#include "src/base/logging.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier.h"
#include "src/heap/heap.h"
@@ -15,6 +16,7 @@
#include "src/heap/marking-worklist-inl.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/safepoint.h"
+#include "src/objects/heap-object.h"
#include "src/objects/js-array-buffer.h"
namespace v8 {
@@ -74,12 +76,30 @@ void MarkingBarrier::Write(JSArrayBuffer host,
void MarkingBarrier::Write(DescriptorArray descriptor_array,
int number_of_own_descriptors) {
DCHECK(IsCurrentMarkingBarrier());
- DCHECK(is_main_thread_barrier_);
- int16_t raw_marked = descriptor_array.raw_number_of_marked_descriptors();
- if (NumberOfMarkedDescriptors::decode(collector_->epoch(), raw_marked) <
- number_of_own_descriptors) {
- collector_->MarkDescriptorArrayFromWriteBarrier(descriptor_array,
- number_of_own_descriptors);
+ DCHECK(IsReadOnlyHeapObject(descriptor_array.map()));
+ // The DescriptorArray needs to be marked black here to ensure that slots are
+ // recorded by the Scavenger in case the DescriptorArray is promoted while
+ // incremental marking is running. This is needed as the regular marking
+ // visitor does not re-process any already marked descriptors. If we don't
+ // mark it black here, the Scavenger may promote a DescriptorArray and any
+ // already marked descriptors will not have any slots recorded.
+ if (!marking_state_.IsBlack(descriptor_array)) {
+ marking_state_.WhiteToGrey(descriptor_array);
+ marking_state_.GreyToBlack(descriptor_array);
+ MarkRange(descriptor_array, descriptor_array.GetFirstPointerSlot(),
+ descriptor_array.GetDescriptorSlot(0));
+ }
+ const int16_t old_marked = descriptor_array.UpdateNumberOfMarkedDescriptors(
+ collector_->epoch(), number_of_own_descriptors);
+ if (old_marked < number_of_own_descriptors) {
+ // This marks the range from [old_marked, number_of_own_descriptors) instead
+ // of registering weak slots which may temporarily hold alive more objects
+ // for the current GC cycle. Weakness is not needed for actual trimming, see
+ // `MarkCompactCollector::TrimDescriptorArray()`.
+ MarkRange(descriptor_array,
+ MaybeObjectSlot(descriptor_array.GetDescriptorSlot(old_marked)),
+ MaybeObjectSlot(descriptor_array.GetDescriptorSlot(
+ number_of_own_descriptors)));
}
}
diff --git a/deps/v8/src/heap/marking-barrier.h b/deps/v8/src/heap/marking-barrier.h
index 9ed1ee6382..a8e084b699 100644
--- a/deps/v8/src/heap/marking-barrier.h
+++ b/deps/v8/src/heap/marking-barrier.h
@@ -55,6 +55,9 @@ class MarkingBarrier {
bool IsCurrentMarkingBarrier();
+ template <typename TSlot>
+ inline void MarkRange(HeapObject value, TSlot start, TSlot end);
+
Heap* heap_;
MarkCompactCollector* collector_;
IncrementalMarking* incremental_marking_;
diff --git a/deps/v8/src/heap/marking-visitor-inl.h b/deps/v8/src/heap/marking-visitor-inl.h
index 39d446aa3a..28fe88d9d1 100644
--- a/deps/v8/src/heap/marking-visitor-inl.h
+++ b/deps/v8/src/heap/marking-visitor-inl.h
@@ -21,6 +21,15 @@ namespace internal {
// ===========================================================================
template <typename ConcreteVisitor, typename MarkingState>
+void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitMapPointer(
+ HeapObject host) {
+ // Note that we are skipping the recording the slot because map objects
+ // can't move, so this is safe (see ProcessStrongHeapObject for comparison)
+ MarkObject(host, HeapObject::cast(
+ host.map(ObjectVisitorWithCageBases::cage_base())));
+}
+
+template <typename ConcreteVisitor, typename MarkingState>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::MarkObject(
HeapObject host, HeapObject object) {
DCHECK(ReadOnlyHeap::Contains(object) || heap_->Contains(object));
@@ -76,7 +85,8 @@ MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitPointersImpl(
HeapObject host, TSlot start, TSlot end) {
using THeapObjectSlot = typename TSlot::THeapObjectSlot;
for (TSlot slot = start; slot < end; ++slot) {
- typename TSlot::TObject object = slot.Relaxed_Load();
+ typename TSlot::TObject object =
+ slot.Relaxed_Load(ObjectVisitorWithCageBases::cage_base());
HeapObject heap_object;
if (object.GetHeapObjectIfStrong(&heap_object)) {
// If the reference changes concurrently from strong to weak, the write
@@ -94,9 +104,8 @@ V8_INLINE void
MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitCodePointerImpl(
HeapObject host, CodeObjectSlot slot) {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base = GetPtrComprCageBase(host);
- Object object = slot.Relaxed_Load(code_cage_base);
+ Object object =
+ slot.Relaxed_Load(ObjectVisitorWithCageBases::code_cage_base());
HeapObject heap_object;
if (object.GetHeapObjectIfStrong(&heap_object)) {
// If the reference changes concurrently from strong to weak, the write
@@ -110,7 +119,8 @@ template <typename ConcreteVisitor, typename MarkingState>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEmbeddedPointer(
Code host, RelocInfo* rinfo) {
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
- HeapObject object = rinfo->target_object();
+ HeapObject object =
+ rinfo->target_object_no_host(ObjectVisitorWithCageBases::cage_base());
if (!concrete_visitor()->marking_state()->IsBlackOrGrey(object)) {
if (host.IsWeakObject(object)) {
weak_objects_->weak_objects_in_code.Push(task_id_,
@@ -142,7 +152,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitBytecodeArray(
int size = BytecodeArray::BodyDescriptor::SizeOf(map, object);
this->VisitMapPointer(object);
BytecodeArray::BodyDescriptor::IterateBody(map, object, size, this);
- if (!is_forced_gc_) {
+ if (!should_keep_ages_unchanged_) {
object.MakeOlder();
}
return size;
diff --git a/deps/v8/src/heap/marking-visitor.h b/deps/v8/src/heap/marking-visitor.h
index 6a016a143e..fdacf8cbaf 100644
--- a/deps/v8/src/heap/marking-visitor.h
+++ b/deps/v8/src/heap/marking-visitor.h
@@ -106,15 +106,17 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch,
base::EnumSet<CodeFlushMode> code_flush_mode,
- bool is_embedder_tracing_enabled, bool is_forced_gc)
- : local_marking_worklists_(local_marking_worklists),
+ bool is_embedder_tracing_enabled,
+ bool should_keep_ages_unchanged)
+ : HeapVisitor<int, ConcreteVisitor>(heap),
+ local_marking_worklists_(local_marking_worklists),
weak_objects_(weak_objects),
heap_(heap),
task_id_(task_id),
mark_compact_epoch_(mark_compact_epoch),
code_flush_mode_(code_flush_mode),
is_embedder_tracing_enabled_(is_embedder_tracing_enabled),
- is_forced_gc_(is_forced_gc),
+ should_keep_ages_unchanged_(should_keep_ages_unchanged),
is_shared_heap_(heap->IsShared()) {}
V8_INLINE int VisitBytecodeArray(Map map, BytecodeArray object);
@@ -134,11 +136,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
V8_INLINE int VisitWeakCell(Map map, WeakCell object);
// ObjectVisitor overrides.
- void VisitMapPointer(HeapObject host) final {
- // Note that we are skipping the recording the slot because map objects
- // can't move, so this is safe (see ProcessStrongHeapObject for comparison)
- MarkObject(host, HeapObject::cast(host.map()));
- }
+ V8_INLINE void VisitMapPointer(HeapObject host) final;
V8_INLINE void VisitPointer(HeapObject host, ObjectSlot p) final {
VisitPointersImpl(host, p, p + 1);
}
@@ -208,7 +206,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
const unsigned mark_compact_epoch_;
const base::EnumSet<CodeFlushMode> code_flush_mode_;
const bool is_embedder_tracing_enabled_;
- const bool is_forced_gc_;
+ const bool should_keep_ages_unchanged_;
const bool is_shared_heap_;
};
diff --git a/deps/v8/src/heap/memory-chunk.cc b/deps/v8/src/heap/memory-chunk.cc
index 29dbf74934..959501724f 100644
--- a/deps/v8/src/heap/memory-chunk.cc
+++ b/deps/v8/src/heap/memory-chunk.cc
@@ -76,7 +76,7 @@ void MemoryChunk::SetReadAndExecutable() {
PageAllocator::kReadExecute);
}
-void MemoryChunk::SetReadAndWritable() {
+void MemoryChunk::SetCodeModificationPermissions() {
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
// Incrementing the write_unprotect_counter_ and changing the page
@@ -100,6 +100,14 @@ void MemoryChunk::SetReadAndWritable() {
}
}
+void MemoryChunk::SetDefaultCodePermissions() {
+ if (FLAG_jitless) {
+ SetReadable();
+ } else {
+ SetReadAndExecutable();
+ }
+}
+
namespace {
PageAllocator::Permission DefaultWritableCodePermissions() {
diff --git a/deps/v8/src/heap/memory-chunk.h b/deps/v8/src/heap/memory-chunk.h
index ad9ac72f83..761ea9a83a 100644
--- a/deps/v8/src/heap/memory-chunk.h
+++ b/deps/v8/src/heap/memory-chunk.h
@@ -191,15 +191,9 @@ class MemoryChunk : public BasicMemoryChunk {
V8_EXPORT_PRIVATE void SetReadable();
V8_EXPORT_PRIVATE void SetReadAndExecutable();
- V8_EXPORT_PRIVATE void SetReadAndWritable();
-
- void SetDefaultCodePermissions() {
- if (FLAG_jitless) {
- SetReadable();
- } else {
- SetReadAndExecutable();
- }
- }
+
+ V8_EXPORT_PRIVATE void SetCodeModificationPermissions();
+ V8_EXPORT_PRIVATE void SetDefaultCodePermissions();
heap::ListNode<MemoryChunk>& list_node() { return list_node_; }
const heap::ListNode<MemoryChunk>& list_node() const { return list_node_; }
diff --git a/deps/v8/src/heap/memory-measurement.cc b/deps/v8/src/heap/memory-measurement.cc
index 87cfb06faf..0ef5d7550b 100644
--- a/deps/v8/src/heap/memory-measurement.cc
+++ b/deps/v8/src/heap/memory-measurement.cc
@@ -7,6 +7,7 @@
#include "include/v8-local-handle.h"
#include "src/api/api-inl.h"
#include "src/execution/isolate-inl.h"
+#include "src/handles/global-handles-inl.h"
#include "src/heap/factory-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/marking-worklist.h"
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 6dcd0a51a0..4c00e8154a 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -145,7 +145,7 @@ FieldStatsCollector::GetInobjectFieldStats(Map map) {
DescriptorArray descriptors = map.instance_descriptors();
for (InternalIndex descriptor : map.IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(descriptor);
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
FieldIndex index = FieldIndex::ForDescriptor(map, descriptor);
// Stop on first out-of-object field.
if (!index.is_inobject()) break;
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index bef24bb1d5..715b83b9ac 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -19,6 +19,7 @@
#include "src/objects/ordered-hash-table.h"
#include "src/objects/synthetic-module-inl.h"
#include "src/objects/torque-defined-classes.h"
+#include "src/objects/visitors.h"
#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-objects.h"
@@ -28,6 +29,19 @@ namespace v8 {
namespace internal {
template <typename ResultType, typename ConcreteVisitor>
+HeapVisitor<ResultType, ConcreteVisitor>::HeapVisitor(
+ PtrComprCageBase cage_base, PtrComprCageBase code_cage_base)
+ : ObjectVisitorWithCageBases(cage_base, code_cage_base) {}
+
+template <typename ResultType, typename ConcreteVisitor>
+HeapVisitor<ResultType, ConcreteVisitor>::HeapVisitor(Isolate* isolate)
+ : ObjectVisitorWithCageBases(isolate) {}
+
+template <typename ResultType, typename ConcreteVisitor>
+HeapVisitor<ResultType, ConcreteVisitor>::HeapVisitor(Heap* heap)
+ : ObjectVisitorWithCageBases(heap) {}
+
+template <typename ResultType, typename ConcreteVisitor>
template <typename T>
T HeapVisitor<ResultType, ConcreteVisitor>::Cast(HeapObject object) {
return T::cast(object);
@@ -35,7 +49,7 @@ T HeapVisitor<ResultType, ConcreteVisitor>::Cast(HeapObject object) {
template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(HeapObject object) {
- return Visit(object.map(), object);
+ return Visit(object.map(cage_base()), object);
}
template <typename ResultType, typename ConcreteVisitor>
@@ -173,6 +187,10 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
}
template <typename ConcreteVisitor>
+NewSpaceVisitor<ConcreteVisitor>::NewSpaceVisitor(Isolate* isolate)
+ : HeapVisitor<int, ConcreteVisitor>(isolate) {}
+
+template <typename ConcreteVisitor>
int NewSpaceVisitor<ConcreteVisitor>::VisitNativeContext(Map map,
NativeContext object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 43578ba806..7babd44fb4 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -9,7 +9,6 @@
#include "src/objects/map.h"
#include "src/objects/objects.h"
#include "src/objects/visitors.h"
-#include "torque-generated/field-offsets.h"
namespace v8 {
namespace internal {
@@ -78,8 +77,13 @@ TORQUE_VISITOR_ID_LIST(FORWARD_DECLARE)
// ...
// }
template <typename ResultType, typename ConcreteVisitor>
-class HeapVisitor : public ObjectVisitor {
+class HeapVisitor : public ObjectVisitorWithCageBases {
public:
+ inline HeapVisitor(PtrComprCageBase cage_base,
+ PtrComprCageBase code_cage_base);
+ inline explicit HeapVisitor(Isolate* isolate);
+ inline explicit HeapVisitor(Heap* heap);
+
V8_INLINE ResultType Visit(HeapObject object);
V8_INLINE ResultType Visit(Map map, HeapObject object);
// A callback for visiting the map pointer in the object header.
@@ -115,6 +119,8 @@ class HeapVisitor : public ObjectVisitor {
template <typename ConcreteVisitor>
class NewSpaceVisitor : public HeapVisitor<int, ConcreteVisitor> {
public:
+ V8_INLINE NewSpaceVisitor(Isolate* isolate);
+
V8_INLINE bool ShouldVisitMapPointer() { return false; }
// Special cases for young generation.
diff --git a/deps/v8/src/heap/paged-spaces.cc b/deps/v8/src/heap/paged-spaces.cc
index 021cf940d7..baac9d5412 100644
--- a/deps/v8/src/heap/paged-spaces.cc
+++ b/deps/v8/src/heap/paged-spaces.cc
@@ -365,6 +365,7 @@ void PagedSpace::DecreaseLimit(Address new_limit) {
optional_scope.emplace(chunk);
}
+ ConcurrentAllocationMutex guard(this);
SetTopAndLimit(top(), new_limit);
Free(new_limit, old_limit - new_limit,
SpaceAccountingMode::kSpaceAccounted);
@@ -498,11 +499,11 @@ void PagedSpace::SetReadAndExecutable() {
}
}
-void PagedSpace::SetReadAndWritable() {
+void PagedSpace::SetCodeModificationPermissions() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
- page->SetReadAndWritable();
+ page->SetCodeModificationPermissions();
}
}
@@ -570,8 +571,9 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
DCHECK(identity() == OLD_SPACE || identity() == MAP_SPACE);
DCHECK_EQ(origin, AllocationOrigin::kRuntime);
- auto result = TryAllocationFromFreeListBackground(
- local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
+ base::Optional<std::pair<Address, size_t>> result =
+ TryAllocationFromFreeListBackground(local_heap, min_size_in_bytes,
+ max_size_in_bytes, alignment, origin);
if (result) return result;
MarkCompactCollector* collector = heap()->mark_compact_collector();
@@ -582,7 +584,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
RefillFreeList();
// Retry the free list allocation.
- auto result = TryAllocationFromFreeListBackground(
+ result = TryAllocationFromFreeListBackground(
local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result;
@@ -600,7 +602,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
RefillFreeList();
if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
- auto result = TryAllocationFromFreeListBackground(
+ result = TryAllocationFromFreeListBackground(
local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result;
}
@@ -608,7 +610,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
heap()->CanExpandOldGenerationBackground(local_heap, AreaSize())) {
- auto result = ExpandBackground(local_heap, max_size_in_bytes);
+ result = ExpandBackground(local_heap, max_size_in_bytes);
if (result) {
DCHECK_EQ(Heap::GetFillToAlign(result->first, alignment), 0);
return result;
@@ -686,6 +688,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
+ PtrComprCageBase cage_base(isolate);
for (Page* page : *this) {
CHECK_EQ(page->owner(), this);
@@ -727,10 +730,11 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
CHECK(object.address() + size <= top);
end_of_previous_object = object.address() + size;
- if (object.IsExternalString()) {
+ if (object.IsExternalString(cage_base)) {
ExternalString external_string = ExternalString::cast(object);
- size_t size = external_string.ExternalPayloadSize();
- external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
+ size_t payload_size = external_string.ExternalPayloadSize();
+ external_page_bytes[ExternalBackingStoreType::kExternalString] +=
+ payload_size;
}
}
for (int i = 0; i < kNumTypes; i++) {
diff --git a/deps/v8/src/heap/paged-spaces.h b/deps/v8/src/heap/paged-spaces.h
index d502b226c4..b5f9f0e391 100644
--- a/deps/v8/src/heap/paged-spaces.h
+++ b/deps/v8/src/heap/paged-spaces.h
@@ -217,7 +217,7 @@ class V8_EXPORT_PRIVATE PagedSpace
void SetReadable();
void SetReadAndExecutable();
- void SetReadAndWritable();
+ void SetCodeModificationPermissions();
void SetDefaultCodePermissions() {
if (FLAG_jitless) {
diff --git a/deps/v8/src/heap/safepoint.cc b/deps/v8/src/heap/safepoint.cc
index e67c9743f8..2d79292bbc 100644
--- a/deps/v8/src/heap/safepoint.cc
+++ b/deps/v8/src/heap/safepoint.cc
@@ -23,6 +23,10 @@ GlobalSafepoint::GlobalSafepoint(Heap* heap)
: heap_(heap), local_heaps_head_(nullptr), active_safepoint_scopes_(0) {}
void GlobalSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
+ // Safepoints need to be initiated on the main thread.
+ DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
+ DCHECK_NULL(LocalHeap::Current());
+
if (++active_safepoint_scopes_ > 1) return;
TimedHistogramScope timer(
@@ -32,7 +36,6 @@ void GlobalSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
local_heaps_mutex_.Lock();
barrier_.Arm();
- DCHECK_NULL(LocalHeap::Current());
int running = 0;
@@ -66,11 +69,13 @@ void GlobalSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
}
void GlobalSafepoint::LeaveSafepointScope(StopMainThread stop_main_thread) {
+ // Safepoints need to be initiated on the main thread.
+ DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
+ DCHECK_NULL(LocalHeap::Current());
+
DCHECK_GT(active_safepoint_scopes_, 0);
if (--active_safepoint_scopes_ > 0) return;
- DCHECK_NULL(LocalHeap::Current());
-
for (LocalHeap* local_heap = local_heaps_head_; local_heap;
local_heap = local_heap->next_) {
if (local_heap->is_main_thread() &&
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 5eea1afafe..7c05527e8c 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -341,10 +341,10 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
kReleaseStore);
return Heap::InYoungGeneration(target) ? KEEP_SLOT : REMOVE_SLOT;
}
- Map map = first_word.ToMap();
- SlotCallbackResult result =
- EvacuateObjectDefault(map, slot, first, first.SizeFromMap(map),
- Map::ObjectFieldsFrom(map.visitor_id()));
+ Map first_map = first_word.ToMap();
+ SlotCallbackResult result = EvacuateObjectDefault(
+ first_map, slot, first, first.SizeFromMap(first_map),
+ Map::ObjectFieldsFrom(first_map.visitor_id()));
object.set_map_word(MapWord::FromForwardingAddress(slot.ToHeapObject()),
kReleaseStore);
return result;
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index f697e83105..3372033efd 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -490,7 +490,8 @@ void ScavengerCollector::IterateStackAndScavenge(
}
void ScavengerCollector::SweepArrayBufferExtensions() {
- heap_->array_buffer_sweeper()->RequestSweepYoung();
+ heap_->array_buffer_sweeper()->RequestSweep(
+ ArrayBufferSweeper::SweepingType::kYoung);
}
void ScavengerCollector::HandleSurvivingNewLargeObjects() {
@@ -779,7 +780,8 @@ RootScavengeVisitor::RootScavengeVisitor(Scavenger* scavenger)
: scavenger_(scavenger) {}
ScavengeVisitor::ScavengeVisitor(Scavenger* scavenger)
- : scavenger_(scavenger) {}
+ : NewSpaceVisitor<ScavengeVisitor>(scavenger->heap()->isolate()),
+ scavenger_(scavenger) {}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 31e8c92258..b3034dff8b 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -640,7 +640,7 @@ void Heap::CreateApiObjects() {
}
void Heap::CreateInitialObjects() {
- HandleScope scope(isolate());
+ HandleScope initial_objects_handle_scope(isolate());
Factory* factory = isolate()->factory();
ReadOnlyRoots roots(this);
@@ -736,7 +736,7 @@ void Heap::CreateInitialObjects() {
set_interpreter_entry_trampoline_for_profiling(roots.undefined_value());
{
- HandleScope scope(isolate());
+ HandleScope handle_scope(isolate());
#define SYMBOL_INIT(_, name) \
{ \
Handle<Symbol> symbol( \
@@ -748,7 +748,7 @@ void Heap::CreateInitialObjects() {
}
{
- HandleScope scope(isolate());
+ HandleScope handle_scope(isolate());
#define SYMBOL_INIT(_, name, description) \
Handle<Symbol> name = factory->NewSymbol(AllocationType::kReadOnly); \
Handle<String> name##d = factory->InternalizeUtf8String(#description); \
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 7d2d680456..7e18fc2895 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -366,6 +366,7 @@ int Sweeper::RawSweep(
// Iterate over the page using the live objects and free the memory before
// the given live object.
Address free_start = p->area_start();
+ PtrComprCageBase cage_base(heap_->isolate());
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
HeapObject const object = object_and_size.first;
@@ -383,8 +384,8 @@ int Sweeper::RawSweep(
free_start, free_end, p, non_empty_typed_slots, &free_ranges_map,
&old_to_new_cleanup);
}
- Map map = object.map(kAcquireLoad);
- DCHECK(map.IsMap());
+ Map map = object.map(cage_base, kAcquireLoad);
+ DCHECK(map.IsMap(cage_base));
int size = object.SizeFromMap(map);
live_bytes += size;
free_start = free_end + size;
diff --git a/deps/v8/src/heap/weak-object-worklists.cc b/deps/v8/src/heap/weak-object-worklists.cc
index 8a36c3aef8..50e268ab91 100644
--- a/deps/v8/src/heap/weak-object-worklists.cc
+++ b/deps/v8/src/heap/weak-object-worklists.cc
@@ -25,11 +25,13 @@ void WeakObjects::UpdateAfterScavenge() {
#undef INVOKE_UPDATE
}
+// static
void WeakObjects::UpdateTransitionArrays(
WeakObjectWorklist<TransitionArray>& transition_arrays) {
DCHECK(!ContainsYoungObjects(transition_arrays));
}
+// static
void WeakObjects::UpdateEphemeronHashTables(
WeakObjectWorklist<EphemeronHashTable>& ephemeron_hash_tables) {
ephemeron_hash_tables.Update(
@@ -61,21 +63,25 @@ bool EphemeronUpdater(Ephemeron slot_in, Ephemeron* slot_out) {
}
} // anonymous namespace
+// static
void WeakObjects::UpdateCurrentEphemerons(
WeakObjectWorklist<Ephemeron>& current_ephemerons) {
current_ephemerons.Update(EphemeronUpdater);
}
+// static
void WeakObjects::UpdateNextEphemerons(
WeakObjectWorklist<Ephemeron>& next_ephemerons) {
next_ephemerons.Update(EphemeronUpdater);
}
+// static
void WeakObjects::UpdateDiscoveredEphemerons(
WeakObjectWorklist<Ephemeron>& discovered_ephemerons) {
discovered_ephemerons.Update(EphemeronUpdater);
}
+// static
void WeakObjects::UpdateWeakReferences(
WeakObjectWorklist<HeapObjectAndSlot>& weak_references) {
weak_references.Update(
@@ -96,6 +102,7 @@ void WeakObjects::UpdateWeakReferences(
});
}
+// static
void WeakObjects::UpdateWeakObjectsInCode(
WeakObjectWorklist<HeapObjectAndCode>& weak_objects_in_code) {
weak_objects_in_code.Update(
@@ -113,6 +120,7 @@ void WeakObjects::UpdateWeakObjectsInCode(
});
}
+// static
void WeakObjects::UpdateJSWeakRefs(
WeakObjectWorklist<JSWeakRef>& js_weak_refs) {
js_weak_refs.Update(
@@ -128,16 +136,19 @@ void WeakObjects::UpdateJSWeakRefs(
});
}
+// static
void WeakObjects::UpdateWeakCells(WeakObjectWorklist<WeakCell>& weak_cells) {
// TODO(syg, marja): Support WeakCells in the young generation.
DCHECK(!ContainsYoungObjects(weak_cells));
}
+// static
void WeakObjects::UpdateCodeFlushingCandidates(
WeakObjectWorklist<SharedFunctionInfo>& code_flushing_candidates) {
DCHECK(!ContainsYoungObjects(code_flushing_candidates));
}
+// static
void WeakObjects::UpdateFlushedJSFunctions(
WeakObjectWorklist<JSFunction>& flushed_js_functions) {
flushed_js_functions.Update(
@@ -153,6 +164,7 @@ void WeakObjects::UpdateFlushedJSFunctions(
});
}
+// static
void WeakObjects::UpdateBaselineFlushingCandidates(
WeakObjectWorklist<JSFunction>& baseline_flush_candidates) {
baseline_flush_candidates.Update(
@@ -169,6 +181,7 @@ void WeakObjects::UpdateBaselineFlushingCandidates(
}
#ifdef DEBUG
+// static
template <typename Type>
bool WeakObjects::ContainsYoungObjects(WeakObjectWorklist<Type>& worklist) {
bool result = false;
diff --git a/deps/v8/src/heap/weak-object-worklists.h b/deps/v8/src/heap/weak-object-worklists.h
index 60e698e0a7..c61b15a0e9 100644
--- a/deps/v8/src/heap/weak-object-worklists.h
+++ b/deps/v8/src/heap/weak-object-worklists.h
@@ -74,13 +74,13 @@ class WeakObjects {
private:
#define DECLARE_UPDATE_METHODS(Type, _, Name) \
- void Update##Name(WeakObjectWorklist<Type>&);
+ static void Update##Name(WeakObjectWorklist<Type>&);
WEAK_OBJECT_WORKLISTS(DECLARE_UPDATE_METHODS)
#undef DECLARE_UPDATE_METHODS
#ifdef DEBUG
template <typename Type>
- bool ContainsYoungObjects(WeakObjectWorklist<Type>& worklist);
+ static bool ContainsYoungObjects(WeakObjectWorklist<Type>& worklist);
#endif
};
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index f27e3b7f59..505849b9b6 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -28,6 +28,11 @@ namespace internal {
//////////////////// Private helpers.
+#define LOAD_KIND(kind) \
+ IntPtrConstant(static_cast<intptr_t>(LoadHandler::Kind::kind))
+#define STORE_KIND(kind) \
+ Int32Constant(static_cast<intptr_t>(StoreHandler::Kind::kind))
+
// Loads dataX field from the DataHandler object.
TNode<MaybeObject> AccessorAssembler::LoadHandlerDataField(
TNode<DataHandler> handler, int data_index) {
@@ -35,7 +40,7 @@ TNode<MaybeObject> AccessorAssembler::LoadHandlerDataField(
TNode<Map> handler_map = LoadMap(handler);
TNode<Uint16T> instance_type = LoadMapInstanceType(handler_map);
#endif
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32Or(InstanceTypeEqual(instance_type, LOAD_HANDLER_TYPE),
InstanceTypeEqual(instance_type, STORE_HANDLER_TYPE)));
int offset = 0;
@@ -57,7 +62,7 @@ TNode<MaybeObject> AccessorAssembler::LoadHandlerDataField(
UNREACHABLE();
}
USE(minimum_size);
- CSA_ASSERT(this, UintPtrGreaterThanOrEqual(
+ CSA_DCHECK(this, UintPtrGreaterThanOrEqual(
LoadMapInstanceSizeInWords(handler_map),
IntPtrConstant(minimum_size / kTaggedSize)));
return LoadMaybeWeakObjectField(handler, offset);
@@ -106,7 +111,7 @@ void AccessorAssembler::HandlePolymorphicCase(
// Load the {feedback} array length.
TNode<IntPtrT> length = LoadAndUntagWeakFixedArrayLength(feedback);
- CSA_ASSERT(this, IntPtrLessThanOrEqual(IntPtrConstant(kEntrySize), length));
+ CSA_DCHECK(this, IntPtrLessThanOrEqual(IntPtrConstant(kEntrySize), length));
// This is a hand-crafted loop that iterates backwards and only compares
// against zero at the end, since we already know that we will have at least a
@@ -118,7 +123,7 @@ void AccessorAssembler::HandlePolymorphicCase(
{
TNode<MaybeObject> maybe_cached_map =
LoadWeakFixedArrayElement(feedback, var_index.value());
- CSA_ASSERT(this, IsWeakOrCleared(maybe_cached_map));
+ CSA_DCHECK(this, IsWeakOrCleared(maybe_cached_map));
GotoIfNot(IsWeakReferenceTo(maybe_cached_map, lookup_start_object_map),
&loop_next);
@@ -150,7 +155,7 @@ void AccessorAssembler::TryMegaDOMCase(TNode<Object> lookup_start_object,
LoadMapBitField(lookup_start_object_map)),
miss);
- CSA_ASSERT(this, TaggedEqual(LoadFeedbackVectorSlot(CAST(vector), slot),
+ CSA_DCHECK(this, TaggedEqual(LoadFeedbackVectorSlot(CAST(vector), slot),
MegaDOMSymbolConstant()));
// In some cases, we load the
@@ -160,7 +165,7 @@ void AccessorAssembler::TryMegaDOMCase(TNode<Object> lookup_start_object,
} else {
TNode<MaybeObject> maybe_handler =
LoadFeedbackVectorSlot(CAST(vector), slot, kTaggedSize);
- CSA_ASSERT(this, IsStrong(maybe_handler));
+ CSA_DCHECK(this, IsStrong(maybe_handler));
handler = CAST(maybe_handler);
}
@@ -169,13 +174,13 @@ void AccessorAssembler::TryMegaDOMCase(TNode<Object> lookup_start_object,
// Load the getter
TNode<MaybeObject> maybe_getter = LoadMegaDomHandlerAccessor(handler);
- CSA_ASSERT(this, IsWeakOrCleared(maybe_getter));
+ CSA_DCHECK(this, IsWeakOrCleared(maybe_getter));
TNode<FunctionTemplateInfo> getter =
CAST(GetHeapObjectAssumeWeak(maybe_getter, miss));
// Load the accessor context
TNode<MaybeObject> maybe_context = LoadMegaDomHandlerContext(handler);
- CSA_ASSERT(this, IsWeakOrCleared(maybe_context));
+ CSA_DCHECK(this, IsWeakOrCleared(maybe_context));
TNode<Context> context = CAST(GetHeapObjectAssumeWeak(maybe_context, miss));
// TODO(gsathya): This builtin throws an exception on interface check fail but
@@ -255,7 +260,7 @@ void AccessorAssembler::HandleLoadAccessor(
[=] { return LoadHandlerDataField(handler, 3); },
[=] { return LoadHandlerDataField(handler, 2); });
- CSA_ASSERT(this, IsWeakOrCleared(maybe_context));
+ CSA_DCHECK(this, IsWeakOrCleared(maybe_context));
CSA_CHECK(this, IsNotCleared(maybe_context));
TNode<HeapObject> context = GetHeapObjectAssumeWeak(maybe_context);
@@ -267,13 +272,10 @@ void AccessorAssembler::HandleLoadAccessor(
TVARIABLE(HeapObject, api_holder, CAST(p->lookup_start_object()));
Label load(this);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kApiGetter)),
- &load);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kApiGetter)), &load);
- CSA_ASSERT(
- this,
- WordEqual(handler_kind,
- IntPtrConstant(LoadHandler::kApiGetterHolderIsPrototype)));
+ CSA_DCHECK(this,
+ WordEqual(handler_kind, LOAD_KIND(kApiGetterHolderIsPrototype)));
api_holder = LoadMapPrototype(LoadMap(CAST(p->lookup_start_object())));
Goto(&load);
@@ -475,25 +477,21 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
if_oob(this, Label::kDeferred), try_string_to_array_index(this),
emit_element_load(this);
TVARIABLE(IntPtrT, var_intptr_index);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kElement)),
- &if_element);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kElement)), &if_element);
if (access_mode == LoadAccessMode::kHas) {
- CSA_ASSERT(this,
- WordNotEqual(handler_kind,
- IntPtrConstant(LoadHandler::kIndexedString)));
+ CSA_DCHECK(this, WordNotEqual(handler_kind, LOAD_KIND(kIndexedString)));
Goto(&if_property);
} else {
- Branch(
- WordEqual(handler_kind, IntPtrConstant(LoadHandler::kIndexedString)),
- &if_indexed_string, &if_property);
+ Branch(WordEqual(handler_kind, LOAD_KIND(kIndexedString)),
+ &if_indexed_string, &if_property);
}
BIND(&if_element);
{
Comment("element_load");
// TODO(ishell): implement
- CSA_ASSERT(this, IsClearWord<LoadHandler::IsWasmArrayBits>(handler_word));
+ CSA_DCHECK(this, IsClearWord<LoadHandler::IsWasmArrayBits>(handler_word));
TVARIABLE(Int32T, var_instance_type);
TNode<IntPtrT> intptr_index = TryToIntptr(
p->name(), &try_string_to_array_index, &var_instance_type);
@@ -510,7 +508,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
CallCFunction(function, MachineType::Int32(),
std::make_pair(MachineType::AnyTagged(), p->name())));
GotoIf(Word32Equal(Int32Constant(-1), result), miss);
- CSA_ASSERT(this, Int32GreaterThanOrEqual(result, Int32Constant(0)));
+ CSA_DCHECK(this, Int32GreaterThanOrEqual(result, Int32Constant(0)));
var_intptr_index = ChangeInt32ToIntPtr(result);
Goto(&emit_element_load);
@@ -588,19 +586,19 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
if (access_mode != LoadAccessMode::kHas) {
BIND(&if_indexed_string);
{
- Label if_oob(this, Label::kDeferred);
+ Label if_oob_string(this, Label::kDeferred);
Comment("indexed string");
TNode<String> string_holder = CAST(holder);
TNode<UintPtrT> index = Unsigned(TryToIntptr(p->name(), miss));
TNode<UintPtrT> length =
Unsigned(LoadStringLengthAsWord(string_holder));
- GotoIf(UintPtrGreaterThanOrEqual(index, length), &if_oob);
+ GotoIf(UintPtrGreaterThanOrEqual(index, length), &if_oob_string);
TNode<Int32T> code = StringCharCodeAt(string_holder, index);
TNode<String> result = StringFromSingleCharCode(code);
Return(result);
- BIND(&if_oob);
+ BIND(&if_oob_string);
TNode<BoolT> allow_out_of_bounds =
IsSetWord<LoadHandler::AllowOutOfBoundsBits>(handler_word);
GotoIfNot(allow_out_of_bounds, miss);
@@ -637,41 +635,32 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
native_data_property(this, Label::kDeferred),
api_getter(this, Label::kDeferred);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kField)), &field);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kField)), &field);
- GotoIf(WordEqual(handler_kind,
- IntPtrConstant(LoadHandler::kConstantFromPrototype)),
- &constant);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kConstantFromPrototype)), &constant);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kNonExistent)),
- &nonexistent);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kNonExistent)), &nonexistent);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kNormal)),
- &normal);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kNormal)), &normal);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kAccessor)),
- &accessor);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kAccessor)), &accessor);
- GotoIf(
- WordEqual(handler_kind, IntPtrConstant(LoadHandler::kNativeDataProperty)),
- &native_data_property);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kNativeDataProperty)),
+ &native_data_property);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kApiGetter)),
- &api_getter);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kApiGetter)), &api_getter);
- GotoIf(WordEqual(handler_kind,
- IntPtrConstant(LoadHandler::kApiGetterHolderIsPrototype)),
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kApiGetterHolderIsPrototype)),
&api_getter);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kGlobal)),
- &global);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kGlobal)), &global);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kSlow)), &slow);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kSlow)), &slow);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kProxy)), &proxy);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kProxy)), &proxy);
- Branch(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kModuleExport)),
- &module_export, &interceptor);
+ Branch(WordEqual(handler_kind, LOAD_KIND(kModuleExport)), &module_export,
+ &interceptor);
BIND(&field);
{
@@ -680,7 +669,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
GotoIf(IsSetWord<LoadHandler::IsWasmStructBits>(handler_word),
&is_wasm_field);
#else
- CSA_ASSERT(this, IsClearWord<LoadHandler::IsWasmStructBits>(handler_word));
+ CSA_DCHECK(this, IsClearWord<LoadHandler::IsWasmStructBits>(handler_word));
#endif // V8_ENABLE_WEBASSEMBLY
HandleLoadField(CAST(holder), handler_word, var_double_value, rebox_double,
@@ -740,7 +729,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
CAST(LoadDescriptorValue(LoadMap(CAST(holder)), descriptor));
TNode<Object> getter =
LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
- CSA_ASSERT(this, Word32BinaryNot(IsTheHole(getter)));
+ CSA_DCHECK(this, Word32BinaryNot(IsTheHole(getter)));
exit_point->Return(Call(p->context(), getter, p->receiver()));
}
@@ -764,7 +753,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
// handling with proxies which is currently not supported by builtins. So
// for such cases, we should install a slow path and never reach here. Fix
// it to not generate this for LoadGlobals.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
WordNotEqual(IntPtrConstant(static_cast<int>(on_nonexistent)),
IntPtrConstant(static_cast<int>(
OnNonExistent::kThrowReferenceError))));
@@ -807,7 +796,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
BIND(&global);
{
- CSA_ASSERT(this, IsPropertyCell(CAST(holder)));
+ CSA_DCHECK(this, IsPropertyCell(CAST(holder)));
// Ensure the property cell doesn't contain the hole.
TNode<Object> value =
LoadObjectField(CAST(holder), PropertyCell::kValueOffset);
@@ -876,37 +865,27 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
Label return_true(this), return_false(this), return_lookup(this),
normal(this), global(this), slow(this);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kField)),
- &return_true);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kField)), &return_true);
- GotoIf(WordEqual(handler_kind,
- IntPtrConstant(LoadHandler::kConstantFromPrototype)),
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kConstantFromPrototype)),
&return_true);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kNonExistent)),
- &return_false);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kNonExistent)), &return_false);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kNormal)),
- &normal);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kNormal)), &normal);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kAccessor)),
- &return_true);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kAccessor)), &return_true);
- GotoIf(
- WordEqual(handler_kind, IntPtrConstant(LoadHandler::kNativeDataProperty)),
- &return_true);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kNativeDataProperty)), &return_true);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kApiGetter)),
- &return_true);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kApiGetter)), &return_true);
- GotoIf(WordEqual(handler_kind,
- IntPtrConstant(LoadHandler::kApiGetterHolderIsPrototype)),
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kApiGetterHolderIsPrototype)),
&return_true);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kSlow)), &slow);
+ GotoIf(WordEqual(handler_kind, LOAD_KIND(kSlow)), &slow);
- Branch(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kGlobal)), &global,
- &return_lookup);
+ Branch(WordEqual(handler_kind, LOAD_KIND(kGlobal)), &global, &return_lookup);
BIND(&return_true);
exit_point->Return(TrueConstant());
@@ -916,14 +895,11 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
BIND(&return_lookup);
{
- CSA_ASSERT(
+ CSA_DCHECK(
this,
- Word32Or(
- WordEqual(handler_kind, IntPtrConstant(LoadHandler::kInterceptor)),
- Word32Or(
- WordEqual(handler_kind, IntPtrConstant(LoadHandler::kProxy)),
- WordEqual(handler_kind,
- IntPtrConstant(LoadHandler::kModuleExport)))));
+ Word32Or(WordEqual(handler_kind, LOAD_KIND(kInterceptor)),
+ Word32Or(WordEqual(handler_kind, LOAD_KIND(kProxy)),
+ WordEqual(handler_kind, LOAD_KIND(kModuleExport)))));
exit_point->ReturnCallStub(
Builtins::CallableFor(isolate(), Builtin::kHasProperty), p->context(),
p->receiver(), p->name());
@@ -945,7 +921,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
BIND(&global);
{
- CSA_ASSERT(this, IsPropertyCell(CAST(holder)));
+ CSA_DCHECK(this, IsPropertyCell(CAST(holder)));
// Ensure the property cell doesn't contain the hole.
TNode<Object> value =
LoadObjectField(CAST(holder), PropertyCell::kValueOffset);
@@ -1025,7 +1001,7 @@ TNode<Object> AccessorAssembler::HandleProtoHandler(
int mask = ICHandler::LookupOnLookupStartObjectBits::kMask |
ICHandler::DoAccessCheckOnLookupStartObjectBits::kMask;
if (ic_mode == ICMode::kGlobalIC) {
- CSA_ASSERT(this, IsClearWord(handler_flags, mask));
+ CSA_DCHECK(this, IsClearWord(handler_flags, mask));
} else {
DCHECK_EQ(ICMode::kNonGlobalIC, ic_mode);
@@ -1033,7 +1009,7 @@ TNode<Object> AccessorAssembler::HandleProtoHandler(
if_lookup_on_lookup_start_object(this);
GotoIf(IsClearWord(handler_flags, mask), &done);
// Only one of the bits can be set at a time.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
WordNotEqual(WordAnd(handler_flags, IntPtrConstant(mask)),
IntPtrConstant(mask)));
Branch(
@@ -1044,7 +1020,7 @@ TNode<Object> AccessorAssembler::HandleProtoHandler(
BIND(&if_do_access_check);
{
TNode<MaybeObject> data2 = LoadHandlerDataField(handler, 2);
- CSA_ASSERT(this, IsWeakOrCleared(data2));
+ CSA_DCHECK(this, IsWeakOrCleared(data2));
TNode<Context> expected_native_context =
CAST(GetHeapObjectAssumeWeak(data2, miss));
EmitAccessCheck(expected_native_context, p->context(),
@@ -1058,7 +1034,7 @@ TNode<Object> AccessorAssembler::HandleProtoHandler(
// lookup_start_object can be a JSGlobalObject) because prototype
// validity cell check already guards modifications of the global
// object.
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32BinaryNot(HasInstanceType(
CAST(p->lookup_start_object()), JS_GLOBAL_OBJECT_TYPE)));
@@ -1123,11 +1099,9 @@ void AccessorAssembler::HandleLoadICProtoHandler(
{
// If the "maybe_holder_or_constant" in the handler is a smi, then it's
// guaranteed that it's not a holder object, but a constant value.
- CSA_ASSERT(
- this,
- WordEqual(
- Signed(DecodeWord<LoadHandler::KindBits>(SmiUntag(smi_handler))),
- IntPtrConstant(LoadHandler::kConstantFromPrototype)));
+ CSA_DCHECK(this, WordEqual(Signed(DecodeWord<LoadHandler::KindBits>(
+ SmiUntag(smi_handler))),
+ LOAD_KIND(kConstantFromPrototype)));
if (access_mode == LoadAccessMode::kHas) {
exit_point->Return(TrueConstant());
} else {
@@ -1141,7 +1115,7 @@ void AccessorAssembler::HandleLoadICProtoHandler(
// the validity cell check implies that |holder| is
// alive. However, for global object receivers, |maybe_holder| may
// be cleared.
- CSA_ASSERT(this, IsWeakOrCleared(maybe_holder_or_constant));
+ CSA_DCHECK(this, IsWeakOrCleared(maybe_holder_or_constant));
TNode<HeapObject> holder =
GetHeapObjectAssumeWeak(maybe_holder_or_constant, miss);
*var_holder = holder;
@@ -1159,7 +1133,7 @@ void AccessorAssembler::EmitAccessCheck(TNode<Context> expected_native_context,
TNode<Context> context,
TNode<Object> receiver,
Label* can_access, Label* miss) {
- CSA_ASSERT(this, IsNativeContext(expected_native_context));
+ CSA_DCHECK(this, IsNativeContext(expected_native_context));
TNode<NativeContext> native_context = LoadNativeContext(context);
GotoIf(TaggedEqual(expected_native_context, native_context), can_access);
@@ -1181,7 +1155,7 @@ void AccessorAssembler::JumpIfDataProperty(TNode<Uint32T> details,
GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask),
readonly);
} else {
- CSA_ASSERT(this, IsNotSetWord32(details,
+ CSA_DCHECK(this, IsNotSetWord32(details,
PropertyDetails::kAttributesReadOnlyMask));
}
TNode<Uint32T> kind = DecodeWord32<PropertyDetails::KindField>(details);
@@ -1221,25 +1195,24 @@ void AccessorAssembler::HandleStoreICHandlerCase(
Label if_fast_smi(this), if_proxy(this), if_interceptor(this),
if_slow(this);
- STATIC_ASSERT(StoreHandler::kGlobalProxy + 1 == StoreHandler::kNormal);
- STATIC_ASSERT(StoreHandler::kNormal + 1 == StoreHandler::kInterceptor);
- STATIC_ASSERT(StoreHandler::kInterceptor + 1 == StoreHandler::kSlow);
- STATIC_ASSERT(StoreHandler::kSlow + 1 == StoreHandler::kProxy);
- STATIC_ASSERT(StoreHandler::kProxy + 1 == StoreHandler::kKindsNumber);
+#define ASSERT_CONSECUTIVE(a, b) \
+ STATIC_ASSERT(static_cast<intptr_t>(StoreHandler::Kind::a) + 1 == \
+ static_cast<intptr_t>(StoreHandler::Kind::b));
+ ASSERT_CONSECUTIVE(kGlobalProxy, kNormal)
+ ASSERT_CONSECUTIVE(kNormal, kInterceptor)
+ ASSERT_CONSECUTIVE(kInterceptor, kSlow)
+ ASSERT_CONSECUTIVE(kSlow, kProxy)
+ ASSERT_CONSECUTIVE(kProxy, kKindsNumber)
+#undef ASSERT_CONSECUTIVE
TNode<Uint32T> handler_kind =
DecodeWord32<StoreHandler::KindBits>(handler_word);
- GotoIf(
- Int32LessThan(handler_kind, Int32Constant(StoreHandler::kGlobalProxy)),
- &if_fast_smi);
- GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kProxy)),
- &if_proxy);
- GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kInterceptor)),
+ GotoIf(Int32LessThan(handler_kind, STORE_KIND(kGlobalProxy)), &if_fast_smi);
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kProxy)), &if_proxy);
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kInterceptor)),
&if_interceptor);
- GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kSlow)),
- &if_slow);
- CSA_ASSERT(this,
- Word32Equal(handler_kind, Int32Constant(StoreHandler::kNormal)));
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kSlow)), &if_slow);
+ CSA_DCHECK(this, Word32Equal(handler_kind, STORE_KIND(kNormal)));
TNode<PropertyDictionary> properties =
CAST(LoadSlowProperties(CAST(holder)));
@@ -1282,14 +1255,9 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&if_fast_smi);
{
- TNode<Uint32T> handler_kind =
- DecodeWord32<StoreHandler::KindBits>(handler_word);
-
Label data(this), accessor(this), native_data_property(this);
- GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kAccessor)),
- &accessor);
- Branch(Word32Equal(handler_kind,
- Int32Constant(StoreHandler::kNativeDataProperty)),
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kAccessor)), &accessor);
+ Branch(Word32Equal(handler_kind, STORE_KIND(kNativeDataProperty)),
&native_data_property, &data);
BIND(&accessor);
@@ -1356,7 +1324,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&store_transition_or_global);
{
// Load value or miss if the {handler} weak cell is cleared.
- CSA_ASSERT(this, IsWeakOrCleared(handler));
+ CSA_DCHECK(this, IsWeakOrCleared(handler));
TNode<HeapObject> map_or_property_cell =
GetHeapObjectAssumeWeak(handler, miss);
@@ -1391,13 +1359,13 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
}
TNode<Uint32T> bitfield3 = LoadMapBitField3(transition_map);
- CSA_ASSERT(this, IsClearWord32<Map::Bits3::IsDictionaryMapBit>(bitfield3));
+ CSA_DCHECK(this, IsClearWord32<Map::Bits3::IsDictionaryMapBit>(bitfield3));
GotoIf(IsSetWord32<Map::Bits3::IsDeprecatedBit>(bitfield3), miss);
// Load last descriptor details.
TNode<UintPtrT> nof =
DecodeWordFromWord32<Map::Bits3::NumberOfOwnDescriptorsBits>(bitfield3);
- CSA_ASSERT(this, WordNotEqual(nof, IntPtrConstant(0)));
+ CSA_DCHECK(this, WordNotEqual(nof, IntPtrConstant(0)));
TNode<DescriptorArray> descriptors = LoadMapDescriptors(transition_map);
TNode<IntPtrT> factor = IntPtrConstant(DescriptorArray::kEntrySize);
@@ -1407,7 +1375,7 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
TNode<Name> key = LoadKeyByKeyIndex(descriptors, last_key_index);
GotoIf(TaggedNotEqual(key, p->name()), miss);
} else {
- CSA_ASSERT(this, TaggedEqual(LoadKeyByKeyIndex(descriptors, last_key_index),
+ CSA_DCHECK(this, TaggedEqual(LoadKeyByKeyIndex(descriptors, last_key_index),
p->name()));
}
TNode<Uint32T> details = LoadDetailsByKeyIndex(descriptors, last_key_index);
@@ -1454,7 +1422,7 @@ void AccessorAssembler::CheckFieldType(TNode<DescriptorArray> descriptors,
&r_heapobject);
GotoIf(Word32Equal(representation, Int32Constant(Representation::kNone)),
bailout);
- CSA_ASSERT(this, Word32Equal(representation,
+ CSA_DCHECK(this, Word32Equal(representation,
Int32Constant(Representation::kTagged)));
Goto(&all_fine);
@@ -1509,12 +1477,13 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
bool do_transitioning_store) {
Label done(this), if_field(this), if_descriptor(this);
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32Equal(DecodeWord32<PropertyDetails::KindField>(details),
Int32Constant(kData)));
- Branch(Word32Equal(DecodeWord32<PropertyDetails::LocationField>(details),
- Int32Constant(kField)),
+ Branch(Word32Equal(
+ DecodeWord32<PropertyDetails::LocationField>(details),
+ Int32Constant(static_cast<int32_t>(PropertyLocation::kField))),
&if_field, &if_descriptor);
BIND(&if_field);
@@ -1675,7 +1644,7 @@ void AccessorAssembler::CheckPrototypeValidityCell(
GotoIf(
TaggedEqual(maybe_validity_cell, SmiConstant(Map::kPrototypeChainValid)),
&done);
- CSA_ASSERT(this, TaggedIsNotSmi(maybe_validity_cell));
+ CSA_DCHECK(this, TaggedIsNotSmi(maybe_validity_cell));
TNode<Object> cell_value =
LoadObjectField(CAST(maybe_validity_cell), Cell::kValueOffset);
@@ -1693,10 +1662,10 @@ void AccessorAssembler::HandleStoreAccessor(const StoreICParameters* p,
Signed(DecodeWordFromWord32<StoreHandler::DescriptorBits>(handler_word));
TNode<HeapObject> accessor_pair =
CAST(LoadDescriptorValue(LoadMap(holder), descriptor));
- CSA_ASSERT(this, IsAccessorPair(accessor_pair));
+ CSA_DCHECK(this, IsAccessorPair(accessor_pair));
TNode<Object> setter =
LoadObjectField(accessor_pair, AccessorPair::kSetterOffset);
- CSA_ASSERT(this, Word32BinaryNot(IsTheHole(setter)));
+ CSA_DCHECK(this, Word32BinaryNot(IsTheHole(setter)));
Return(Call(p->context(), setter, p->receiver(), p->value()));
}
@@ -1759,41 +1728,33 @@ void AccessorAssembler::HandleStoreICProtoHandler(
Label if_add_normal(this), if_store_global_proxy(this), if_api_setter(this),
if_accessor(this), if_native_data_property(this), if_slow(this);
- CSA_ASSERT(this, TaggedIsSmi(smi_handler));
+ CSA_DCHECK(this, TaggedIsSmi(smi_handler));
TNode<Int32T> handler_word = SmiToInt32(CAST(smi_handler));
TNode<Uint32T> handler_kind =
DecodeWord32<StoreHandler::KindBits>(handler_word);
- GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kNormal)),
- &if_add_normal);
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kNormal)), &if_add_normal);
- GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kSlow)),
- &if_slow);
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kSlow)), &if_slow);
TNode<MaybeObject> maybe_holder = LoadHandlerDataField(handler, 1);
- CSA_ASSERT(this, IsWeakOrCleared(maybe_holder));
+ CSA_DCHECK(this, IsWeakOrCleared(maybe_holder));
TNode<HeapObject> holder = GetHeapObjectAssumeWeak(maybe_holder, miss);
- GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kGlobalProxy)),
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kGlobalProxy)),
&if_store_global_proxy);
- GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kAccessor)),
- &if_accessor);
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kAccessor)), &if_accessor);
- GotoIf(Word32Equal(handler_kind,
- Int32Constant(StoreHandler::kNativeDataProperty)),
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kNativeDataProperty)),
&if_native_data_property);
- GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kApiSetter)),
- &if_api_setter);
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kApiSetter)), &if_api_setter);
- GotoIf(
- Word32Equal(handler_kind,
- Int32Constant(StoreHandler::kApiSetterHolderIsPrototype)),
- &if_api_setter);
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kApiSetterHolderIsPrototype)),
+ &if_api_setter);
- CSA_ASSERT(this,
- Word32Equal(handler_kind, Int32Constant(StoreHandler::kProxy)));
+ CSA_DCHECK(this, Word32Equal(handler_kind, STORE_KIND(kProxy)));
HandleStoreToProxy(p, CAST(holder), miss, support_elements);
BIND(&if_slow);
@@ -1840,7 +1801,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
BIND(&if_api_setter);
{
Comment("api_setter");
- CSA_ASSERT(this, TaggedIsNotSmi(handler));
+ CSA_DCHECK(this, TaggedIsNotSmi(handler));
TNode<CallHandlerInfo> call_handler_info = CAST(holder);
// Context is stored either in data2 or data3 field depending on whether
@@ -1851,7 +1812,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
[=] { return LoadHandlerDataField(handler, 3); },
[=] { return LoadHandlerDataField(handler, 2); });
- CSA_ASSERT(this, IsWeakOrCleared(maybe_context));
+ CSA_DCHECK(this, IsWeakOrCleared(maybe_context));
TNode<Object> context = Select<Object>(
IsCleared(maybe_context), [=] { return SmiConstant(0); },
[=] { return GetHeapObjectAssumeWeak(maybe_context); });
@@ -1864,13 +1825,10 @@ void AccessorAssembler::HandleStoreICProtoHandler(
TVARIABLE(Object, api_holder, p->receiver());
Label store(this);
- GotoIf(Word32Equal(handler_kind, Int32Constant(StoreHandler::kApiSetter)),
- &store);
+ GotoIf(Word32Equal(handler_kind, STORE_KIND(kApiSetter)), &store);
- CSA_ASSERT(this,
- Word32Equal(
- handler_kind,
- Int32Constant(StoreHandler::kApiSetterHolderIsPrototype)));
+ CSA_DCHECK(this, Word32Equal(handler_kind,
+ STORE_KIND(kApiSetterHolderIsPrototype)));
api_holder = LoadMapPrototype(LoadMap(CAST(p->receiver())));
Goto(&store);
@@ -1935,11 +1893,9 @@ void AccessorAssembler::HandleStoreICSmiHandlerCase(TNode<Word32T> handler_word,
#ifdef DEBUG
TNode<Uint32T> handler_kind =
DecodeWord32<StoreHandler::KindBits>(handler_word);
- CSA_ASSERT(
- this,
- Word32Or(
- Word32Equal(handler_kind, Int32Constant(StoreHandler::kField)),
- Word32Equal(handler_kind, Int32Constant(StoreHandler::kConstField))));
+ CSA_DCHECK(this,
+ Word32Or(Word32Equal(handler_kind, STORE_KIND(kField)),
+ Word32Equal(handler_kind, STORE_KIND(kConstField))));
#endif
TNode<Uint32T> field_representation =
@@ -1984,7 +1940,7 @@ void AccessorAssembler::HandleStoreICSmiHandlerCase(TNode<Word32T> handler_word,
BIND(&if_double_field);
{
- CSA_ASSERT(this, Word32Equal(field_representation,
+ CSA_DCHECK(this, Word32Equal(field_representation,
Int32Constant(Representation::kDouble)));
Comment("double field checks");
TNode<Float64T> double_value = TryTaggedToFloat64(value, miss);
@@ -2005,7 +1961,7 @@ void AccessorAssembler::CheckHeapObjectTypeMatchesDescriptor(
// Skip field type check in favor of constant value check when storing
// to constant field.
GotoIf(Word32Equal(DecodeWord32<StoreHandler::KindBits>(handler_word),
- Int32Constant(StoreHandler::kConstField)),
+ STORE_KIND(kConstField)),
&done);
TNode<IntPtrT> descriptor =
Signed(DecodeWordFromWord32<StoreHandler::DescriptorBits>(handler_word));
@@ -2048,8 +2004,6 @@ void AccessorAssembler::HandleStoreFieldAndReturn(
TNode<Word32T> handler_word, TNode<JSObject> holder, TNode<Object> value,
base::Optional<TNode<Float64T>> double_value, Representation representation,
Label* miss) {
- Label done(this);
-
bool store_value_as_double = representation.IsDouble();
TNode<BoolT> is_inobject =
@@ -2072,7 +2026,7 @@ void AccessorAssembler::HandleStoreFieldAndReturn(
// Store the double value directly into the mutable HeapNumber.
TNode<Object> field = LoadObjectField(property_storage, offset);
- CSA_ASSERT(this, IsHeapNumber(CAST(field)));
+ CSA_DCHECK(this, IsHeapNumber(CAST(field)));
actual_property_storage = CAST(field);
actual_offset = IntPtrConstant(HeapNumber::kValueOffset);
Goto(&property_and_offset_ready);
@@ -2085,7 +2039,7 @@ void AccessorAssembler::HandleStoreFieldAndReturn(
// Do constant value check if necessary.
Label do_store(this);
GotoIfNot(Word32Equal(DecodeWord32<StoreHandler::KindBits>(handler_word),
- Int32Constant(StoreHandler::kConstField)),
+ STORE_KIND(kConstField)),
&do_store);
{
if (store_value_as_double) {
@@ -2172,7 +2126,7 @@ TNode<PropertyArray> AccessorAssembler::ExtendPropertiesBackingStore(
FixedArrayBase::GetMaxLengthForNewSpaceAllocation(PACKED_ELEMENTS));
// The size of a new properties backing store is guaranteed to be small
// enough that the new backing store will be allocated in new space.
- CSA_ASSERT(this, IntPtrLessThan(new_capacity,
+ CSA_DCHECK(this, IntPtrLessThan(new_capacity,
IntPtrConstant(kMaxNumberOfDescriptors +
JSObject::kFieldsAdded)));
@@ -2624,56 +2578,58 @@ void AccessorAssembler::GenericPropertyLoad(
GotoIf(IsSetWord32<Map::Bits3::IsDictionaryMapBit>(bitfield3),
&if_property_dictionary);
- // Try looking up the property on the lookup_start_object; if unsuccessful,
- // look for a handler in the stub cache.
- TNode<DescriptorArray> descriptors =
- LoadMapDescriptors(lookup_start_object_map);
+ {
+ // Try looking up the property on the lookup_start_object; if unsuccessful,
+ // look for a handler in the stub cache.
+ TNode<DescriptorArray> descriptors =
+ LoadMapDescriptors(lookup_start_object_map);
- Label if_descriptor_found(this), try_stub_cache(this);
- TVARIABLE(IntPtrT, var_name_index);
- Label* notfound = use_stub_cache == kUseStubCache ? &try_stub_cache
- : &lookup_prototype_chain;
- DescriptorLookup(name, descriptors, bitfield3, &if_descriptor_found,
- &var_name_index, notfound);
+ Label if_descriptor_found(this), try_stub_cache(this);
+ TVARIABLE(IntPtrT, var_name_index);
+ Label* notfound = use_stub_cache == kUseStubCache ? &try_stub_cache
+ : &lookup_prototype_chain;
+ DescriptorLookup(name, descriptors, bitfield3, &if_descriptor_found,
+ &var_name_index, notfound);
- BIND(&if_descriptor_found);
- {
- LoadPropertyFromFastObject(lookup_start_object, lookup_start_object_map,
- descriptors, var_name_index.value(),
- &var_details, &var_value);
- Goto(&if_found_on_lookup_start_object);
- }
-
- if (use_stub_cache == kUseStubCache) {
- DCHECK_EQ(lookup_start_object, p->receiver_and_lookup_start_object());
- Label stub_cache(this);
- BIND(&try_stub_cache);
- // When there is no feedback vector don't use stub cache.
- GotoIfNot(IsUndefined(p->vector()), &stub_cache);
- // Fall back to the slow path for private symbols.
- Branch(IsPrivateSymbol(name), slow, &lookup_prototype_chain);
-
- BIND(&stub_cache);
- Comment("stub cache probe for fast property load");
- TVARIABLE(MaybeObject, var_handler);
- Label found_handler(this, &var_handler), stub_cache_miss(this);
- TryProbeStubCache(isolate()->load_stub_cache(), lookup_start_object, name,
- &found_handler, &var_handler, &stub_cache_miss);
- BIND(&found_handler);
+ BIND(&if_descriptor_found);
{
- LazyLoadICParameters lazy_p(p);
- HandleLoadICHandlerCase(&lazy_p, CAST(var_handler.value()),
- &stub_cache_miss, &direct_exit);
+ LoadPropertyFromFastObject(lookup_start_object, lookup_start_object_map,
+ descriptors, var_name_index.value(),
+ &var_details, &var_value);
+ Goto(&if_found_on_lookup_start_object);
}
- BIND(&stub_cache_miss);
- {
- // TODO(jkummerow): Check if the property exists on the prototype
- // chain. If it doesn't, then there's no point in missing.
- Comment("KeyedLoadGeneric_miss");
- TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context(),
- p->receiver_and_lookup_start_object(), name, p->slot(),
- p->vector());
+ if (use_stub_cache == kUseStubCache) {
+ DCHECK_EQ(lookup_start_object, p->receiver_and_lookup_start_object());
+ Label stub_cache(this);
+ BIND(&try_stub_cache);
+ // When there is no feedback vector don't use stub cache.
+ GotoIfNot(IsUndefined(p->vector()), &stub_cache);
+ // Fall back to the slow path for private symbols.
+ Branch(IsPrivateSymbol(name), slow, &lookup_prototype_chain);
+
+ BIND(&stub_cache);
+ Comment("stub cache probe for fast property load");
+ TVARIABLE(MaybeObject, var_handler);
+ Label found_handler(this, &var_handler), stub_cache_miss(this);
+ TryProbeStubCache(isolate()->load_stub_cache(), lookup_start_object, name,
+ &found_handler, &var_handler, &stub_cache_miss);
+ BIND(&found_handler);
+ {
+ LazyLoadICParameters lazy_p(p);
+ HandleLoadICHandlerCase(&lazy_p, CAST(var_handler.value()),
+ &stub_cache_miss, &direct_exit);
+ }
+
+ BIND(&stub_cache_miss);
+ {
+ // TODO(jkummerow): Check if the property exists on the prototype
+ // chain. If it doesn't, then there's no point in missing.
+ Comment("KeyedLoadGeneric_miss");
+ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context(),
+ p->receiver_and_lookup_start_object(), name, p->slot(),
+ p->vector());
+ }
}
}
@@ -2750,7 +2706,7 @@ void AccessorAssembler::GenericPropertyLoad(
BIND(&is_private_symbol);
{
- CSA_ASSERT(this, IsPrivateSymbol(name));
+ CSA_DCHECK(this, IsPrivateSymbol(name));
// For private names that don't exist on the receiver, we bail
// to the runtime to throw. For private symbols, we just return
@@ -2791,7 +2747,7 @@ TNode<IntPtrT> AccessorAssembler::StubCachePrimaryOffset(TNode<Name> name,
TNode<Map> map) {
// Compute the hash of the name (use entire hash field).
TNode<Uint32T> raw_hash_field = LoadNameRawHashField(name);
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32Equal(Word32And(raw_hash_field,
Int32Constant(Name::kHashNotComputedMask)),
Int32Constant(0)));
@@ -3100,9 +3056,9 @@ void AccessorAssembler::LoadIC_Noninlined(const LoadICParameters* p,
ExitPoint* exit_point) {
// Neither deprecated map nor monomorphic. These cases are handled in the
// bytecode handler.
- CSA_ASSERT(this, Word32BinaryNot(IsDeprecatedMap(lookup_start_object_map)));
- CSA_ASSERT(this, TaggedNotEqual(lookup_start_object_map, feedback));
- CSA_ASSERT(this, Word32BinaryNot(IsWeakFixedArrayMap(LoadMap(feedback))));
+ CSA_DCHECK(this, Word32BinaryNot(IsDeprecatedMap(lookup_start_object_map)));
+ CSA_DCHECK(this, TaggedNotEqual(lookup_start_object_map, feedback));
+ CSA_DCHECK(this, Word32BinaryNot(IsWeakFixedArrayMap(LoadMap(feedback))));
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
{
@@ -3240,7 +3196,7 @@ void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase(
BIND(&if_property_cell);
{
// Load value or try handler case if the weak reference is cleared.
- CSA_ASSERT(this, IsWeakOrCleared(maybe_weak_ref));
+ CSA_DCHECK(this, IsWeakOrCleared(maybe_weak_ref));
TNode<PropertyCell> property_cell =
CAST(GetHeapObjectAssumeWeak(maybe_weak_ref, try_handler));
TNode<Object> value =
@@ -3420,16 +3376,18 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p,
// slot.
Comment("KeyedLoadIC_try_polymorphic_name");
TVARIABLE(Name, var_name);
- TVARIABLE(IntPtrT, var_index);
Label if_polymorphic_name(this), feedback_matches(this),
if_internalized(this), if_notinternalized(this, Label::kDeferred);
// Fast-case: The recorded {feedback} matches the {name}.
GotoIf(TaggedEqual(strong_feedback, p->name()), &feedback_matches);
- // Try to internalize the {name} if it isn't already.
- TryToName(p->name(), &miss, &var_index, &if_internalized, &var_name, &miss,
- &if_notinternalized);
+ {
+ // Try to internalize the {name} if it isn't already.
+ TVARIABLE(IntPtrT, var_index);
+ TryToName(p->name(), &miss, &var_index, &if_internalized, &var_name,
+ &miss, &if_notinternalized);
+ }
BIND(&if_internalized);
{
@@ -3572,8 +3530,8 @@ void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p,
// When we get here, we know that the {name} matches the recorded
// feedback name in the {vector} and can safely be used for the
// LoadIC handler logic below.
- CSA_ASSERT(this, Word32BinaryNot(IsDeprecatedMap(lookup_start_object_map)));
- CSA_ASSERT(this, TaggedEqual(name, LoadFeedbackVectorSlot(vector, slot)),
+ CSA_DCHECK(this, Word32BinaryNot(IsDeprecatedMap(lookup_start_object_map)));
+ CSA_DCHECK(this, TaggedEqual(name, LoadFeedbackVectorSlot(vector, slot)),
name, vector);
// Check if we have a matching handler for the {lookup_start_object_map}.
@@ -3674,7 +3632,7 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
{
Label try_handler(this), miss(this, Label::kDeferred);
- CSA_ASSERT(this, IsWeakOrCleared(maybe_weak_ref));
+ CSA_DCHECK(this, IsWeakOrCleared(maybe_weak_ref));
TNode<PropertyCell> property_cell =
CAST(GetHeapObjectAssumeWeak(maybe_weak_ref, &try_handler));
@@ -3741,7 +3699,7 @@ void AccessorAssembler::StoreGlobalIC_PropertyCellCase(
TNode<Int32T> details = LoadAndUntagToWord32ObjectField(
property_cell, PropertyCell::kPropertyDetailsRawOffset);
GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask), miss);
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32Equal(DecodeWord32<PropertyDetails::KindField>(details),
Int32Constant(kData)));
@@ -3753,12 +3711,12 @@ void AccessorAssembler::StoreGlobalIC_PropertyCellCase(
GotoIf(Word32Equal(type, Int32Constant(
static_cast<int>(PropertyCellType::kConstant))),
&constant);
- CSA_ASSERT(this, Word32BinaryNot(IsTheHole(cell_contents)));
+ CSA_DCHECK(this, Word32BinaryNot(IsTheHole(cell_contents)));
GotoIf(Word32Equal(
type, Int32Constant(static_cast<int>(PropertyCellType::kMutable))),
&store);
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Word32Or(Word32Equal(type, Int32Constant(static_cast<int>(
PropertyCellType::kConstantType))),
Word32Equal(type, Int32Constant(static_cast<int>(
@@ -3787,7 +3745,7 @@ void AccessorAssembler::StoreGlobalIC_PropertyCellCase(
{
// Since |value| is never the hole, the equality check below also handles an
// invalidated property cell correctly.
- CSA_ASSERT(this, Word32BinaryNot(IsTheHole(value)));
+ CSA_DCHECK(this, Word32BinaryNot(IsTheHole(value)));
GotoIfNot(TaggedEqual(cell_contents, value), miss);
exit_point->Return(value);
}
@@ -3929,8 +3887,7 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
TNode<Int32T> handler_word = SmiToInt32(CAST(var_handler.value()));
TNode<Uint32T> handler_kind =
DecodeWord32<StoreHandler::KindBits>(handler_word);
- CSA_ASSERT(this, Word32Equal(handler_kind,
- Int32Constant(StoreHandler::kSlow)));
+ CSA_DCHECK(this, Word32Equal(handler_kind, STORE_KIND(kSlow)));
#endif
Comment("StoreInArrayLiteralIC_Slow");
@@ -3952,7 +3909,7 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
BIND(&try_megamorphic);
{
Comment("StoreInArrayLiteralIC_try_megamorphic");
- CSA_ASSERT(
+ CSA_DCHECK(
this,
Word32Or(TaggedEqual(strong_feedback, UninitializedSymbolConstant()),
TaggedEqual(strong_feedback, MegamorphicSymbolConstant())));
@@ -4007,7 +3964,7 @@ void AccessorAssembler::GenerateLoadIC_Megamorphic() {
TVARIABLE(MaybeObject, var_handler);
Label if_handler(this, &var_handler), miss(this, Label::kDeferred);
- CSA_ASSERT(this, TaggedEqual(LoadFeedbackVectorSlot(CAST(vector), slot),
+ CSA_DCHECK(this, TaggedEqual(LoadFeedbackVectorSlot(CAST(vector), slot),
MegamorphicSymbolConstant()));
TryProbeStubCache(isolate()->load_stub_cache(), receiver, CAST(name),
@@ -4582,15 +4539,15 @@ void AccessorAssembler::GenerateCloneObjectIC() {
Label allocate_object(this);
GotoIf(IsNullOrUndefined(source), &allocate_object);
- CSA_SLOW_ASSERT(this, IsJSObjectMap(source_map));
- CSA_SLOW_ASSERT(this, IsJSObjectMap(result_map));
+ CSA_SLOW_DCHECK(this, IsJSObjectMap(source_map));
+ CSA_SLOW_DCHECK(this, IsJSObjectMap(result_map));
// The IC fast case should only be taken if the result map a compatible
// elements kind with the source object.
TNode<FixedArrayBase> source_elements = LoadElements(CAST(source));
- auto flags = ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW;
- var_elements = CAST(CloneFixedArray(source_elements, flags));
+ auto flag = ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW;
+ var_elements = CAST(CloneFixedArray(source_elements, flag));
// Copy the PropertyArray backing store. The source PropertyArray must be
// either an Smi, or a PropertyArray.
@@ -4668,7 +4625,7 @@ void AccessorAssembler::GenerateCloneObjectIC() {
BIND(&try_megamorphic);
{
Comment("CloneObjectIC_try_megamorphic");
- CSA_ASSERT(
+ CSA_DCHECK(
this,
Word32Or(TaggedEqual(strong_feedback, UninitializedSymbolConstant()),
TaggedEqual(strong_feedback, MegamorphicSymbolConstant())));
@@ -4690,7 +4647,7 @@ void AccessorAssembler::GenerateCloneObjectIC() {
slot, maybe_vector));
var_handler = UncheckedCast<MaybeObject>(map_or_result);
GotoIf(IsMap(map_or_result), &if_handler);
- CSA_ASSERT(this, IsJSObject(map_or_result));
+ CSA_DCHECK(this, IsJSObject(map_or_result));
Return(map_or_result);
}
}
@@ -4793,5 +4750,8 @@ void AccessorAssembler::BranchIfPrototypesHaveNoElements(
}
}
+#undef LOAD_KIND
+#undef STORE_KIND
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
index 285c266b80..081229c443 100644
--- a/deps/v8/src/ic/handler-configuration-inl.h
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -39,27 +39,27 @@ LoadHandler::Kind LoadHandler::GetHandlerKind(Smi smi_handler) {
}
Handle<Smi> LoadHandler::LoadNormal(Isolate* isolate) {
- int config = KindBits::encode(kNormal);
+ int config = KindBits::encode(Kind::kNormal);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadGlobal(Isolate* isolate) {
- int config = KindBits::encode(kGlobal);
+ int config = KindBits::encode(Kind::kGlobal);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadInterceptor(Isolate* isolate) {
- int config = KindBits::encode(kInterceptor);
+ int config = KindBits::encode(Kind::kInterceptor);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadSlow(Isolate* isolate) {
- int config = KindBits::encode(kSlow);
+ int config = KindBits::encode(Kind::kSlow);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadField(Isolate* isolate, FieldIndex field_index) {
- int config = KindBits::encode(kField) |
+ int config = KindBits::encode(Kind::kField) |
IsInobjectBits::encode(field_index.is_inobject()) |
IsDoubleBits::encode(field_index.is_double()) |
FieldIndexBits::encode(field_index.index());
@@ -68,49 +68,51 @@ Handle<Smi> LoadHandler::LoadField(Isolate* isolate, FieldIndex field_index) {
Handle<Smi> LoadHandler::LoadWasmStructField(Isolate* isolate,
WasmValueType type, int offset) {
- int config = KindBits::encode(kField) | IsWasmStructBits::encode(true) |
+ int config = KindBits::encode(Kind::kField) | IsWasmStructBits::encode(true) |
WasmFieldTypeBits::encode(type) |
WasmFieldOffsetBits::encode(offset);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadConstantFromPrototype(Isolate* isolate) {
- int config = KindBits::encode(kConstantFromPrototype);
+ int config = KindBits::encode(Kind::kConstantFromPrototype);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadAccessor(Isolate* isolate, int descriptor) {
- int config = KindBits::encode(kAccessor) | DescriptorBits::encode(descriptor);
+ int config =
+ KindBits::encode(Kind::kAccessor) | DescriptorBits::encode(descriptor);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadProxy(Isolate* isolate) {
- int config = KindBits::encode(kProxy);
+ int config = KindBits::encode(Kind::kProxy);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadNativeDataProperty(Isolate* isolate,
int descriptor) {
- int config = KindBits::encode(kNativeDataProperty) |
+ int config = KindBits::encode(Kind::kNativeDataProperty) |
DescriptorBits::encode(descriptor);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadApiGetter(Isolate* isolate,
bool holder_is_receiver) {
- int config = KindBits::encode(
- holder_is_receiver ? kApiGetter : kApiGetterHolderIsPrototype);
+ int config =
+ KindBits::encode(holder_is_receiver ? Kind::kApiGetter
+ : Kind::kApiGetterHolderIsPrototype);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadModuleExport(Isolate* isolate, int index) {
int config =
- KindBits::encode(kModuleExport) | ExportsIndexBits::encode(index);
+ KindBits::encode(Kind::kModuleExport) | ExportsIndexBits::encode(index);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadNonExistent(Isolate* isolate) {
- int config = KindBits::encode(kNonExistent);
+ int config = KindBits::encode(Kind::kNonExistent);
return handle(Smi::FromInt(config), isolate);
}
@@ -120,7 +122,7 @@ Handle<Smi> LoadHandler::LoadElement(Isolate* isolate,
bool is_js_array,
KeyedAccessLoadMode load_mode) {
int config =
- KindBits::encode(kElement) |
+ KindBits::encode(Kind::kElement) |
AllowOutOfBoundsBits::encode(load_mode == LOAD_IGNORE_OUT_OF_BOUNDS) |
ElementsKindBits::encode(elements_kind) |
ConvertHoleBits::encode(convert_hole_to_undefined) |
@@ -131,15 +133,15 @@ Handle<Smi> LoadHandler::LoadElement(Isolate* isolate,
Handle<Smi> LoadHandler::LoadIndexedString(Isolate* isolate,
KeyedAccessLoadMode load_mode) {
int config =
- KindBits::encode(kIndexedString) |
+ KindBits::encode(Kind::kIndexedString) |
AllowOutOfBoundsBits::encode(load_mode == LOAD_IGNORE_OUT_OF_BOUNDS);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> LoadHandler::LoadWasmArrayElement(Isolate* isolate,
WasmValueType type) {
- int config = KindBits::encode(kElement) | IsWasmArrayBits::encode(true) |
- WasmArrayTypeBits::encode(type);
+ int config = KindBits::encode(Kind::kElement) |
+ IsWasmArrayBits::encode(true) | WasmArrayTypeBits::encode(type);
return handle(Smi::FromInt(config), isolate);
}
@@ -148,17 +150,17 @@ OBJECT_CONSTRUCTORS_IMPL(StoreHandler, DataHandler)
CAST_ACCESSOR(StoreHandler)
Handle<Smi> StoreHandler::StoreGlobalProxy(Isolate* isolate) {
- int config = KindBits::encode(kGlobalProxy);
+ int config = KindBits::encode(Kind::kGlobalProxy);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> StoreHandler::StoreNormal(Isolate* isolate) {
- int config = KindBits::encode(kNormal);
+ int config = KindBits::encode(Kind::kNormal);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> StoreHandler::StoreInterceptor(Isolate* isolate) {
- int config = KindBits::encode(kInterceptor);
+ int config = KindBits::encode(Kind::kInterceptor);
return handle(Smi::FromInt(config), isolate);
}
@@ -210,8 +212,8 @@ Builtin StoreHandler::ElementsTransitionAndStoreBuiltin(
Handle<Smi> StoreHandler::StoreSlow(Isolate* isolate,
KeyedAccessStoreMode store_mode) {
- int config =
- KindBits::encode(kSlow) | KeyedAccessStoreModeBits::encode(store_mode);
+ int config = KindBits::encode(Kind::kSlow) |
+ KeyedAccessStoreModeBits::encode(store_mode);
return handle(Smi::FromInt(config), isolate);
}
@@ -220,7 +222,7 @@ Handle<Smi> StoreHandler::StoreProxy(Isolate* isolate) {
}
Smi StoreHandler::StoreProxy() {
- int config = KindBits::encode(kProxy);
+ int config = KindBits::encode(Kind::kProxy);
return Smi::FromInt(config);
}
@@ -228,7 +230,7 @@ Handle<Smi> StoreHandler::StoreField(Isolate* isolate, Kind kind,
int descriptor, FieldIndex field_index,
Representation representation) {
DCHECK(!representation.IsNone());
- DCHECK(kind == kField || kind == kConstField);
+ DCHECK(kind == Kind::kField || kind == Kind::kConstField);
int config = KindBits::encode(kind) |
IsInobjectBits::encode(field_index.is_inobject()) |
@@ -242,26 +244,29 @@ Handle<Smi> StoreHandler::StoreField(Isolate* isolate, int descriptor,
FieldIndex field_index,
PropertyConstness constness,
Representation representation) {
- Kind kind = constness == PropertyConstness::kMutable ? kField : kConstField;
+ Kind kind = constness == PropertyConstness::kMutable ? Kind::kField
+ : Kind::kConstField;
return StoreField(isolate, kind, descriptor, field_index, representation);
}
Handle<Smi> StoreHandler::StoreNativeDataProperty(Isolate* isolate,
int descriptor) {
- int config = KindBits::encode(kNativeDataProperty) |
+ int config = KindBits::encode(Kind::kNativeDataProperty) |
DescriptorBits::encode(descriptor);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> StoreHandler::StoreAccessor(Isolate* isolate, int descriptor) {
- int config = KindBits::encode(kAccessor) | DescriptorBits::encode(descriptor);
+ int config =
+ KindBits::encode(Kind::kAccessor) | DescriptorBits::encode(descriptor);
return handle(Smi::FromInt(config), isolate);
}
Handle<Smi> StoreHandler::StoreApiSetter(Isolate* isolate,
bool holder_is_receiver) {
- int config = KindBits::encode(
- holder_is_receiver ? kApiSetter : kApiSetterHolderIsPrototype);
+ int config =
+ KindBits::encode(holder_is_receiver ? Kind::kApiSetter
+ : Kind::kApiSetterHolderIsPrototype);
return handle(Smi::FromInt(config), isolate);
}
diff --git a/deps/v8/src/ic/handler-configuration.cc b/deps/v8/src/ic/handler-configuration.cc
index 194478fc80..83793b64ae 100644
--- a/deps/v8/src/ic/handler-configuration.cc
+++ b/deps/v8/src/ic/handler-configuration.cc
@@ -172,7 +172,7 @@ KeyedAccessLoadMode LoadHandler::GetKeyedAccessLoadMode(MaybeObject handler) {
if (handler->IsSmi()) {
int const raw_handler = handler.ToSmi().value();
Kind const kind = KindBits::decode(raw_handler);
- if ((kind == kElement || kind == kIndexedString) &&
+ if ((kind == Kind::kElement || kind == Kind::kIndexedString) &&
AllowOutOfBoundsBits::decode(raw_handler)) {
return LOAD_IGNORE_OUT_OF_BOUNDS;
}
@@ -191,7 +191,7 @@ KeyedAccessStoreMode StoreHandler::GetKeyedAccessStoreMode(
// KeyedAccessStoreMode, compute it using KeyedAccessStoreModeForBuiltin
// method. Hence if any other Handler get to this path, just return
// STANDARD_STORE.
- if (kind != kSlow) {
+ if (kind != Kind::kSlow) {
return STANDARD_STORE;
}
KeyedAccessStoreMode store_mode =
@@ -251,8 +251,8 @@ MaybeObjectHandle StoreHandler::StoreTransition(Isolate* isolate,
DCHECK(!transition_map->IsJSGlobalObjectMap());
Handle<StoreHandler> handler = isolate->factory()->NewStoreHandler(0);
// Store normal with enabled lookup on receiver.
- int config =
- KindBits::encode(kNormal) | LookupOnLookupStartObjectBits::encode(true);
+ int config = KindBits::encode(Kind::kNormal) |
+ LookupOnLookupStartObjectBits::encode(true);
handler->set_smi_handler(Smi::FromInt(config));
handler->set_validity_cell(*validity_cell);
return MaybeObjectHandle(handler);
diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h
index 2fc200f93e..728f1f575a 100644
--- a/deps/v8/src/ic/handler-configuration.h
+++ b/deps/v8/src/ic/handler-configuration.h
@@ -48,7 +48,7 @@ class LoadHandler final : public DataHandler {
DECL_PRINTER(LoadHandler)
DECL_VERIFIER(LoadHandler)
- enum Kind {
+ enum class Kind {
kElement,
kIndexedString,
kNormal,
@@ -245,7 +245,7 @@ class StoreHandler final : public DataHandler {
DECL_PRINTER(StoreHandler)
DECL_VERIFIER(StoreHandler)
- enum Kind {
+ enum class Kind {
kField,
kConstField,
kAccessor,
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 68eee92cef..b2e3b6fe25 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -142,13 +142,19 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
ic_info.type += type;
int code_offset = 0;
+ AbstractCode code = function.abstract_code(isolate_);
if (function.ActiveTierIsIgnition()) {
code_offset = InterpretedFrame::GetBytecodeOffset(frame->fp());
+ } else if (function.ActiveTierIsBaseline()) {
+ // TODO(pthier): AbstractCode should fully support Baseline code.
+ BaselineFrame* baseline_frame = BaselineFrame::cast(frame);
+ code_offset = baseline_frame->GetBytecodeOffset();
+ code = AbstractCode::cast(baseline_frame->GetBytecodeArray());
} else {
code_offset = static_cast<int>(frame->pc() - function.code_entry_point());
}
- JavaScriptFrame::CollectFunctionAndOffsetForICStats(
- function, function.abstract_code(isolate_), code_offset);
+ JavaScriptFrame::CollectFunctionAndOffsetForICStats(function, code,
+ code_offset);
// Reserve enough space for IC transition state, the longest length is 17.
ic_info.state.reserve(17);
@@ -973,11 +979,11 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
// Use simple field loads for some well-known callback properties.
// The method will only return true for absolute truths based on the
// lookup start object maps.
- FieldIndex index;
+ FieldIndex field_index;
if (Accessors::IsJSObjectFieldAccessor(isolate(), map, lookup->name(),
- &index)) {
+ &field_index)) {
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldDH);
- return LoadHandler::LoadField(isolate(), index);
+ return LoadHandler::LoadField(isolate(), field_index);
}
if (holder->IsJSModuleNamespace()) {
Handle<ObjectHashTable> exports(
@@ -988,9 +994,9 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
Smi::ToInt(lookup->name()->GetHash()));
// We found the accessor, so the entry must exist.
DCHECK(entry.is_found());
- int index = ObjectHashTable::EntryToValueIndex(entry);
+ int value_index = ObjectHashTable::EntryToValueIndex(entry);
Handle<Smi> smi_handler =
- LoadHandler::LoadModuleExport(isolate(), index);
+ LoadHandler::LoadModuleExport(isolate(), value_index);
if (holder_is_lookup_start_object) {
return smi_handler;
}
@@ -1134,7 +1140,8 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
return LoadHandler::LoadSlow(isolate());
} else {
- DCHECK_EQ(kField, lookup->property_details().location());
+ DCHECK_EQ(PropertyLocation::kField,
+ lookup->property_details().location());
#if V8_ENABLE_WEBASSEMBLY
if (V8_UNLIKELY(holder->IsWasmObject(isolate()))) {
smi_handler =
@@ -1993,7 +2000,7 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
}
// -------------- Fields --------------
- if (lookup->property_details().location() == kField) {
+ if (lookup->property_details().location() == PropertyLocation::kField) {
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldDH);
int descriptor = lookup->GetFieldDescriptorIndex();
FieldIndex index = lookup->GetFieldIndex();
@@ -2010,7 +2017,8 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
}
// -------------- Constant properties --------------
- DCHECK_EQ(kDescriptor, lookup->property_details().location());
+ DCHECK_EQ(PropertyLocation::kDescriptor,
+ lookup->property_details().location());
set_slow_stub_reason("constant property");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return MaybeObjectHandle(StoreHandler::StoreSlow(isolate()));
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 8218d3d521..be54b9c113 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -317,7 +317,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
TNode<IntPtrT> index, TNode<Object> value, TNode<Context> context,
Label* slow, UpdateLength update_length) {
if (update_length != kDontChangeLength) {
- CSA_ASSERT(this, IsJSArrayMap(receiver_map));
+ CSA_DCHECK(this, IsJSArrayMap(receiver_map));
// Check if the length property is writable. The fast check is only
// supported for fast properties.
GotoIf(IsDictionaryMap(receiver_map), slow);
@@ -429,7 +429,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
TryRewriteElements(receiver, receiver_map, elements, native_context,
PACKED_SMI_ELEMENTS, target_kind, slow);
// The elements backing store didn't change, no reload necessary.
- CSA_ASSERT(this, TaggedEqual(elements, LoadElements(receiver)));
+ CSA_DCHECK(this, TaggedEqual(elements, LoadElements(receiver)));
Store(elements, offset, value);
MaybeUpdateLengthAndReturn(receiver, index, value, update_length);
}
@@ -760,7 +760,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
TNode<JSReceiver> receiver, TNode<Map> receiver_map,
const StoreICParameters* p, ExitPoint* exit_point, Label* slow,
Maybe<LanguageMode> maybe_language_mode) {
- CSA_ASSERT(this, IsSimpleObjectMap(receiver_map));
+ CSA_DCHECK(this, IsSimpleObjectMap(receiver_map));
// TODO(rmcilroy) Type as Struct once we use a trimmed down
// LoadAccessorFromFastObject instead of LoadPropertyFromFastObject.
TVARIABLE(Object, var_accessor_pair);
@@ -891,14 +891,13 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
GotoIf(IsJSTypedArrayMap(receiver_map), slow);
CheckForAssociatedProtector(name, slow);
Label extensible(this), is_private_symbol(this);
- TNode<Uint32T> bitfield3 = LoadMapBitField3(receiver_map);
GotoIf(IsPrivateSymbol(name), &is_private_symbol);
Branch(IsSetWord32<Map::Bits3::IsExtensibleBit>(bitfield3), &extensible,
slow);
BIND(&is_private_symbol);
{
- CSA_ASSERT(this, IsPrivateSymbol(name));
+ CSA_DCHECK(this, IsPrivateSymbol(name));
// For private names, we miss to the runtime which will throw.
// For private symbols, we extend and store an own property.
Branch(IsPrivateName(CAST(name)), slow, &extensible);
@@ -931,7 +930,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
Label not_callable(this);
TNode<Struct> accessor_pair = CAST(var_accessor_pair.value());
GotoIf(IsAccessorInfo(accessor_pair), slow);
- CSA_ASSERT(this, IsAccessorPair(accessor_pair));
+ CSA_DCHECK(this, IsAccessorPair(accessor_pair));
TNode<HeapObject> setter =
CAST(LoadObjectField(accessor_pair, AccessorPair::kSetterOffset));
TNode<Map> setter_map = LoadMap(setter);
@@ -1112,7 +1111,7 @@ void KeyedStoreGenericAssembler::SetProperty(TNode<Context> context,
Label done(this), slow(this, Label::kDeferred);
ExitPoint exit_point(this, [&](TNode<Object> result) { Goto(&done); });
- CSA_ASSERT(this, Word32Equal(is_simple_receiver,
+ CSA_DCHECK(this, Word32Equal(is_simple_receiver,
IsSimpleObjectMap(LoadMap(receiver))));
GotoIfNot(is_simple_receiver, &slow);
diff --git a/deps/v8/src/ic/unary-op-assembler.cc b/deps/v8/src/ic/unary-op-assembler.cc
index 97ce0cf48d..fb5ab7f422 100644
--- a/deps/v8/src/ic/unary-op-assembler.cc
+++ b/deps/v8/src/ic/unary-op-assembler.cc
@@ -145,7 +145,7 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
Label if_smi(this), if_heapnumber(this), if_oddball(this);
Label if_bigint(this, Label::kDeferred);
Label if_other(this, Label::kDeferred);
- TNode<Object> value = var_value.value();
+ value = var_value.value();
GotoIf(TaggedIsSmi(value), &if_smi);
TNode<HeapObject> value_heap_object = CAST(value);
@@ -181,7 +181,7 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
// We do not require an Or with earlier feedback here because once we
// convert the value to a number, we cannot reach this path. We can
// only reach this path on the first pass when the feedback is kNone.
- CSA_ASSERT(this, SmiEqual(var_feedback.value(),
+ CSA_DCHECK(this, SmiEqual(var_feedback.value(),
SmiConstant(BinaryOperationFeedback::kNone)));
OverwriteFeedback(&var_feedback,
BinaryOperationFeedback::kNumberOrOddball);
@@ -195,7 +195,7 @@ class UnaryOpAssemblerImpl final : public CodeStubAssembler {
// We do not require an Or with earlier feedback here because once we
// convert the value to a number, we cannot reach this path. We can
// only reach this path on the first pass when the feedback is kNone.
- CSA_ASSERT(this, SmiEqual(var_feedback.value(),
+ CSA_DCHECK(this, SmiEqual(var_feedback.value(),
SmiConstant(BinaryOperationFeedback::kNone)));
OverwriteFeedback(&var_feedback, BinaryOperationFeedback::kAny);
var_value = CallBuiltin(Builtin::kNonNumberToNumeric, context,
diff --git a/deps/v8/src/init/bootstrapper.cc b/deps/v8/src/init/bootstrapper.cc
index e81b74d440..ea654ba103 100644
--- a/deps/v8/src/init/bootstrapper.cc
+++ b/deps/v8/src/init/bootstrapper.cc
@@ -281,7 +281,7 @@ class Genesis {
Handle<Context> native_context);
bool ConfigureApiObject(Handle<JSObject> object,
Handle<ObjectTemplateInfo> object_template);
- bool ConfigureGlobalObjects(
+ bool ConfigureGlobalObject(
v8::Local<v8::ObjectTemplate> global_proxy_template);
// Migrates all properties from the 'from' object to the 'to'
@@ -357,27 +357,6 @@ void Bootstrapper::LogAllMaps() {
LOG(isolate_, LogAllMaps());
}
-void Bootstrapper::DetachGlobal(Handle<Context> env) {
- isolate_->counters()->errors_thrown_per_context()->AddSample(
- env->native_context().GetErrorsThrown());
-
- ReadOnlyRoots roots(isolate_);
- Handle<JSGlobalProxy> global_proxy(env->global_proxy(), isolate_);
- global_proxy->set_native_context(roots.null_value());
- // NOTE: Turbofan's JSNativeContextSpecialization depends on DetachGlobal
- // causing a map change.
- JSObject::ForceSetPrototype(isolate_, global_proxy,
- isolate_->factory()->null_value());
- global_proxy->map().set_constructor_or_back_pointer(roots.null_value(),
- kRelaxedStore);
- if (FLAG_track_detached_contexts) {
- isolate_->AddDetachedContext(env);
- }
- DCHECK(global_proxy->IsDetached());
-
- env->native_context().set_microtask_queue(isolate_, nullptr);
-}
-
namespace {
#ifdef DEBUG
@@ -1339,9 +1318,8 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
global_proxy_function->initial_map().set_may_have_interesting_symbols(true);
native_context()->set_global_proxy_function(*global_proxy_function);
- // Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects
- // Return the global proxy.
-
+ // Set the global object as the (hidden) __proto__ of the global proxy after
+ // ConfigureGlobalObject
factory()->ReinitializeJSGlobalProxy(global_proxy, global_proxy_function);
// Set the native context for the global object.
@@ -4390,7 +4368,6 @@ void Genesis::InitializeCallSiteBuiltins() {
#define EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(id) \
void Genesis::InitializeGlobal_##id() {}
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_sequence)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_top_level_await)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_assertions)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_private_brand_checks)
@@ -4602,7 +4579,7 @@ void Genesis::InitializeGlobal_harmony_intl_enumeration() {
.ToHandleChecked());
SimpleInstallFunction(isolate(), intl, "supportedValuesOf",
- Builtin::kIntlSupportedValuesOf, 0, false);
+ Builtin::kIntlSupportedValuesOf, 1, false);
}
#endif // V8_INTL_SUPPORT
@@ -5193,7 +5170,7 @@ bool Genesis::InstallExtension(Isolate* isolate,
return result;
}
-bool Genesis::ConfigureGlobalObjects(
+bool Genesis::ConfigureGlobalObject(
v8::Local<v8::ObjectTemplate> global_proxy_template) {
Handle<JSObject> global_proxy(native_context()->global_proxy(), isolate());
Handle<JSObject> global_object(native_context()->global_object(), isolate());
@@ -5261,7 +5238,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
from->map().instance_descriptors(isolate()), isolate());
for (InternalIndex i : from->map().IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
if (details.kind() == kData) {
HandleScope inner(isolate());
Handle<Name> key = Handle<Name>(descs->GetKey(i), isolate());
@@ -5278,7 +5255,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
}
} else {
- DCHECK_EQ(kDescriptor, details.location());
+ DCHECK_EQ(PropertyLocation::kDescriptor, details.location());
DCHECK_EQ(kAccessor, details.kind());
Handle<Name> key(descs->GetKey(i), isolate());
// If the property is already there we skip it.
@@ -5481,16 +5458,20 @@ Genesis::Genesis(
isolate->set_context(*native_context());
isolate->counters()->contexts_created_by_snapshot()->Increment();
- if (context_snapshot_index == 0) {
+ // If no global proxy template was passed in, simply use the global in the
+ // snapshot. If a global proxy template was passed in it's used to recreate
+ // the global object and its protype chain, and the data properties from the
+ // deserialized global are copied onto it.
+ if (context_snapshot_index == 0 && !global_proxy_template.IsEmpty()) {
Handle<JSGlobalObject> global_object =
CreateNewGlobals(global_proxy_template, global_proxy);
HookUpGlobalObject(global_object);
-
- if (!ConfigureGlobalObjects(global_proxy_template)) return;
+ if (!ConfigureGlobalObject(global_proxy_template)) return;
} else {
// The global proxy needs to be integrated into the native context.
HookUpGlobalProxy(global_proxy);
}
+ DCHECK_EQ(global_proxy->native_context(), *native_context());
DCHECK(!global_proxy->IsDetachedFrom(native_context()->global_object()));
} else {
DCHECK(native_context().is_null());
@@ -5517,7 +5498,7 @@ Genesis::Genesis(
if (!InstallABunchOfRandomThings()) return;
if (!InstallExtrasBindings()) return;
- if (!ConfigureGlobalObjects(global_proxy_template)) return;
+ if (!ConfigureGlobalObject(global_proxy_template)) return;
isolate->counters()->contexts_created_from_scratch()->Increment();
diff --git a/deps/v8/src/init/bootstrapper.h b/deps/v8/src/init/bootstrapper.h
index b92e755c93..0309c38c57 100644
--- a/deps/v8/src/init/bootstrapper.h
+++ b/deps/v8/src/init/bootstrapper.h
@@ -80,9 +80,6 @@ class Bootstrapper final {
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_object_template);
- // Detach the environment from its outer global object.
- void DetachGlobal(Handle<Context> env);
-
// Traverses the pointers for memory management.
void Iterate(RootVisitor* v);
diff --git a/deps/v8/src/init/isolate-allocator.cc b/deps/v8/src/init/isolate-allocator.cc
index 34a24a348f..c790f5c09a 100644
--- a/deps/v8/src/init/isolate-allocator.cc
+++ b/deps/v8/src/init/isolate-allocator.cc
@@ -86,13 +86,23 @@ void IsolateAllocator::InitializeOncePerProcess() {
// disallowed in the future, at the latest once ArrayBuffers are referenced
// through an offset rather than a raw pointer.
if (GetProcessWideVirtualMemoryCage()->is_disabled()) {
- CHECK(kAllowBackingStoresOutsideDataCage);
+ CHECK(kAllowBackingStoresOutsideCage);
} else {
auto cage = GetProcessWideVirtualMemoryCage();
CHECK(cage->is_initialized());
- DCHECK_EQ(params.reservation_size, cage->pointer_cage_size());
- existing_reservation = base::AddressRegion(cage->pointer_cage_base(),
- cage->pointer_cage_size());
+ // The pointer compression cage must be placed at the start of the virtual
+ // memory cage.
+ // TODO(chromium:12180) this currently assumes that no other pages were
+ // allocated through the cage's page allocator in the meantime. In the
+ // future, the cage initialization will happen just before this function
+ // runs, and so this will be guaranteed. Currently however, it is possible
+ // that the embedder accidentally uses the cage's page allocator prior to
+ // initializing V8, in which case this CHECK will likely fail.
+ CHECK(cage->page_allocator()->AllocatePagesAt(
+ cage->base(), params.reservation_size, PageAllocator::kNoAccess));
+ existing_reservation =
+ base::AddressRegion(cage->base(), params.reservation_size);
+ params.page_allocator = cage->page_allocator();
}
#endif
if (!GetProcessWidePtrComprCage()->InitReservation(params,
diff --git a/deps/v8/src/init/v8.cc b/deps/v8/src/init/v8.cc
index 7258ba8d93..a7b558bbad 100644
--- a/deps/v8/src/init/v8.cc
+++ b/deps/v8/src/init/v8.cc
@@ -80,7 +80,7 @@ void V8::InitializeOncePerProcessImpl() {
if (!GetProcessWideVirtualMemoryCage()->is_initialized()) {
// For now, we still allow the cage to be disabled even if V8 was compiled
// with V8_VIRTUAL_MEMORY_CAGE. This will eventually be forbidden.
- CHECK(kAllowBackingStoresOutsideDataCage);
+ CHECK(kAllowBackingStoresOutsideCage);
GetProcessWideVirtualMemoryCage()->Disable();
}
#endif
@@ -181,6 +181,8 @@ void V8::InitializeOncePerProcessImpl() {
if (FLAG_random_seed) SetRandomMmapSeed(FLAG_random_seed);
+ if (FLAG_print_flag_values) FlagList::PrintValues();
+
#if defined(V8_USE_PERFETTO)
if (perfetto::Tracing::IsInitialized()) TrackEvent::Register();
#endif
diff --git a/deps/v8/src/init/vm-cage.cc b/deps/v8/src/init/vm-cage.cc
index 9d88e4085b..f62b7d4cd6 100644
--- a/deps/v8/src/init/vm-cage.cc
+++ b/deps/v8/src/init/vm-cage.cc
@@ -5,6 +5,7 @@
#include "src/init/vm-cage.h"
#include "include/v8-internal.h"
+#include "src/base/bits.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/lazy-instance.h"
#include "src/utils/allocation.h"
@@ -23,16 +24,31 @@ bool V8VirtualMemoryCage::Initialize(v8::PageAllocator* page_allocator,
size_t size, bool use_guard_regions) {
CHECK(!initialized_);
CHECK(!disabled_);
+ CHECK(base::bits::IsPowerOfTwo(size));
CHECK_GE(size, kVirtualMemoryCageMinimumSize);
- size_t reservation_size = size;
- if (use_guard_regions) {
- reservation_size += 2 * kVirtualMemoryCageGuardRegionSize;
+ // Currently, we allow the cage to be smaller than the requested size. This
+ // way, we can gracefully handle cage reservation failures during the initial
+ // rollout and can collect data on how often these occur. In the future, we
+ // will likely either require the cage to always have a fixed size or will
+ // design CagedPointers (pointers that are guaranteed to point into the cage,
+ // e.g. because they are stored as offsets from the cage base) in a way that
+ // doesn't reduce the cage's security properties if it has a smaller size.
+ // Which of these options is ultimately taken likey depends on how frequently
+ // cage reservation failures occur in practice.
+ while (!base_ && size >= kVirtualMemoryCageMinimumSize) {
+ size_t reservation_size = size;
+ if (use_guard_regions) {
+ reservation_size += 2 * kVirtualMemoryCageGuardRegionSize;
+ }
+ base_ = reinterpret_cast<Address>(page_allocator->AllocatePages(
+ nullptr, reservation_size, kVirtualMemoryCageAlignment,
+ PageAllocator::kNoAccess));
+ if (!base_) {
+ size /= 2;
+ }
}
- base_ = reinterpret_cast<Address>(page_allocator->AllocatePages(
- nullptr, reservation_size, kVirtualMemoryCageAlignment,
- PageAllocator::kNoAccess));
if (!base_) return false;
if (use_guard_regions) {
@@ -43,9 +59,9 @@ bool V8VirtualMemoryCage::Initialize(v8::PageAllocator* page_allocator,
page_allocator_ = page_allocator;
size_ = size;
- data_cage_page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
- page_allocator_, data_cage_base(), data_cage_size(),
- page_allocator_->AllocatePageSize());
+ cage_page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
+ page_allocator_, base_, size_, page_allocator_->AllocatePageSize(),
+ base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized);
initialized_ = true;
@@ -54,7 +70,7 @@ bool V8VirtualMemoryCage::Initialize(v8::PageAllocator* page_allocator,
void V8VirtualMemoryCage::TearDown() {
if (initialized_) {
- data_cage_page_allocator_.reset();
+ cage_page_allocator_.reset();
Address reservation_base = base_;
size_t reservation_size = size_;
if (has_guard_regions_) {
diff --git a/deps/v8/src/init/vm-cage.h b/deps/v8/src/init/vm-cage.h
index 5fdd2ad6e0..d7e0728ca1 100644
--- a/deps/v8/src/init/vm-cage.h
+++ b/deps/v8/src/init/vm-cage.h
@@ -6,6 +6,7 @@
#define V8_INIT_VM_CAGE_H_
#include "include/v8-internal.h"
+#include "src/base/bounded-page-allocator.h"
#include "src/common/globals.h"
namespace v8 {
@@ -19,48 +20,45 @@ namespace internal {
/**
* V8 Virtual Memory Cage.
*
- * When the virtual memory cage is enabled, v8 will place most of its objects
- * inside a dedicated region of virtual address space. In particular, all v8
- * heaps, inside which objects reference themselves using compressed (32-bit)
- * pointers, are located at the start of the virtual memory cage (the "pointer
- * cage") and pure memory buffers like ArrayBuffer backing stores, which
- * themselves do not contain any pointers, are located in the remaining part of
- * the cage (the "data cage"). These buffers will eventually be referenced from
- * inside the v8 heap using offsets rather than pointers. It should then be
- * assumed that an attacker is able to corrupt data arbitrarily and concurrently
- * inside the virtual memory cage.
+ * When the virtual memory cage is enabled, V8 will reserve a large region of
+ * virtual address space - the cage - and place most of its objects inside of
+ * it. This allows these objects to reference each other through offsets rather
+ * than raw pointers, which in turn makes it harder for an attacker to abuse
+ * them in an exploit.
+ *
+ * The pointer compression region, which contains most V8 objects, and inside
+ * of which compressed (32-bit) pointers are used, is located at the start of
+ * the virtual memory cage. The remainder of the cage is mostly used for memory
+ * buffers, in particular ArrayBuffer backing stores and WASM memory cages.
+ *
+ * It should be assumed that an attacker is able to corrupt data arbitrarily
+ * and concurrently inside the virtual memory cage. The heap sandbox, of which
+ * the virtual memory cage is one building block, attempts to then stop an
+ * attacker from corrupting data outside of the cage.
*
* As the embedder is responsible for providing ArrayBuffer allocators, v8
- * exposes a page allocator for the data cage to the embedder.
+ * exposes a page allocator for the virtual memory cage to the embedder.
*
- * TODO(chromium:1218005) Maybe don't call the sub-regions "cages" as well to
- * avoid confusion? In any case, the names should probably be identical to the
- * internal names for these virtual memory regions (where they are currently
- * called cages).
* TODO(chromium:1218005) come up with a coherent naming scheme for this class
* and the other "cages" in v8.
*/
-class V8VirtualMemoryCage {
+class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
public:
+ // +- ~~~ -+---------------------------------------- ~~~ -+- ~~~ -+
+ // | 32 GB | (Ideally) 1 TB | 32 GB |
+ // | | | |
+ // | Guard | 4 GB : ArrayBuffer backing stores, | Guard |
+ // | Region | V8 Heap : WASM memory buffers, and | Region |
+ // | (front) | Region : any other caged objects. | (back) |
// +- ~~~ -+----------------+----------------------- ~~~ -+- ~~~ -+
- // | 32 GB | 4 GB | | 32 GB |
- // +- ~~~ -+----------------+----------------------- ~~~ -+- ~~~ -+
- // ^ ^ ^ ^
- // Guard Pointer Cage Data Cage Guard
- // Region (contains all (contains all ArrayBuffer and Region
- // (front) V8 heaps) WASM memory backing stores) (back)
- //
- // | base ---------------- size ------------------> |
+ // ^ ^
+ // base base + size
V8VirtualMemoryCage() = default;
V8VirtualMemoryCage(const V8VirtualMemoryCage&) = delete;
V8VirtualMemoryCage& operator=(V8VirtualMemoryCage&) = delete;
- bool is_initialized() const { return initialized_; }
- bool is_disabled() const { return disabled_; }
- bool is_enabled() const { return !disabled_; }
-
bool Initialize(v8::PageAllocator* page_allocator);
void Disable() {
CHECK(!initialized_);
@@ -69,16 +67,16 @@ class V8VirtualMemoryCage {
void TearDown();
+ bool is_initialized() const { return initialized_; }
+ bool is_disabled() const { return disabled_; }
+ bool is_enabled() const { return !disabled_; }
+
Address base() const { return base_; }
size_t size() const { return size_; }
- Address pointer_cage_base() const { return base_; }
- size_t pointer_cage_size() const { return kVirtualMemoryCagePointerCageSize; }
-
- Address data_cage_base() const {
- return pointer_cage_base() + pointer_cage_size();
+ base::BoundedPageAllocator* page_allocator() const {
+ return cage_page_allocator_.get();
}
- size_t data_cage_size() const { return size_ - pointer_cage_size(); }
bool Contains(Address addr) const {
return addr >= base_ && addr < base_ + size_;
@@ -88,11 +86,9 @@ class V8VirtualMemoryCage {
return Contains(reinterpret_cast<Address>(ptr));
}
- v8::PageAllocator* GetDataCagePageAllocator() {
- return data_cage_page_allocator_.get();
- }
-
private:
+ // The SequentialUnmapperTest calls the private Initialize method to create a
+ // cage without guard regions, which would otherwise consume too much memory.
friend class SequentialUnmapperTest;
// We allow tests to disable the guard regions around the cage. This is useful
@@ -106,18 +102,21 @@ class V8VirtualMemoryCage {
bool has_guard_regions_ = false;
bool initialized_ = false;
bool disabled_ = false;
+ // The PageAllocator through which the virtual memory of the cage was
+ // allocated.
v8::PageAllocator* page_allocator_ = nullptr;
- std::unique_ptr<v8::PageAllocator> data_cage_page_allocator_;
+ // The BoundedPageAllocator to allocate pages inside the cage.
+ std::unique_ptr<base::BoundedPageAllocator> cage_page_allocator_;
};
-V8VirtualMemoryCage* GetProcessWideVirtualMemoryCage();
+V8_EXPORT_PRIVATE V8VirtualMemoryCage* GetProcessWideVirtualMemoryCage();
#endif // V8_VIRTUAL_MEMORY_CAGE
V8_INLINE bool IsValidBackingStorePointer(void* ptr) {
#ifdef V8_VIRTUAL_MEMORY_CAGE
Address addr = reinterpret_cast<Address>(ptr);
- return kAllowBackingStoresOutsideDataCage || addr == kNullAddress ||
+ return kAllowBackingStoresOutsideCage || addr == kNullAddress ||
GetProcessWideVirtualMemoryCage()->Contains(addr);
#else
return true;
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index e927c1cc40..9cd481e96b 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -609,9 +609,9 @@ std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable(
}
}
if (!selectedColumns.empty()) {
- for (const std::unique_ptr<PropertyPreview>& column :
+ for (const std::unique_ptr<PropertyPreview>& prop :
*preview->getProperties()) {
- ObjectPreview* columnPreview = column->getValuePreview(nullptr);
+ ObjectPreview* columnPreview = prop->getValuePreview(nullptr);
if (!columnPreview) continue;
// Use raw pointer here since the lifetime of each PropertyPreview is
// ensured by columnPreview. This saves an additional clone.
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 1216dc78de..da75adcd59 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -23,7 +23,7 @@ namespace v8_inspector {
namespace {
-static const int kMaxAsyncTaskStacks = 128 * 1024;
+static const int kMaxAsyncTaskStacks = 8 * 1024;
static const int kNoBreakpointId = 0;
template <typename Map>
@@ -657,7 +657,7 @@ void V8Debugger::AsyncEventOccurred(v8::debug::DebugAsyncActionType type,
break;
case v8::debug::kAsyncFunctionSuspended: {
if (m_asyncTaskStacks.find(task) == m_asyncTaskStacks.end()) {
- asyncTaskScheduledForStack("async function", task, true);
+ asyncTaskScheduledForStack("await", task, true, true);
}
auto stackIt = m_asyncTaskStacks.find(task);
if (stackIt != m_asyncTaskStacks.end() && !stackIt->second.expired()) {
@@ -770,12 +770,13 @@ v8::MaybeLocal<v8::Value> V8Debugger::generatorScopes(
}
v8::MaybeLocal<v8::Array> V8Debugger::collectionsEntries(
- v8::Local<v8::Context> context, v8::Local<v8::Value> value) {
+ v8::Local<v8::Context> context, v8::Local<v8::Value> collection) {
v8::Isolate* isolate = context->GetIsolate();
v8::Local<v8::Array> entries;
bool isKeyValue = false;
- if (!value->IsObject() ||
- !value.As<v8::Object>()->PreviewEntries(&isKeyValue).ToLocal(&entries)) {
+ if (!collection->IsObject() || !collection.As<v8::Object>()
+ ->PreviewEntries(&isKeyValue)
+ .ToLocal(&entries)) {
return v8::MaybeLocal<v8::Array>();
}
@@ -976,11 +977,13 @@ void V8Debugger::asyncTaskFinished(void* task) {
}
void V8Debugger::asyncTaskScheduledForStack(const String16& taskName,
- void* task, bool recurring) {
+ void* task, bool recurring,
+ bool skipTopFrame) {
if (!m_maxAsyncCallStackDepth) return;
v8::HandleScope scope(m_isolate);
std::shared_ptr<AsyncStackTrace> asyncStack = AsyncStackTrace::capture(
- this, taskName, V8StackTraceImpl::maxCallStackSizeToCapture);
+ this, taskName, V8StackTraceImpl::maxCallStackSizeToCapture,
+ skipTopFrame);
if (asyncStack) {
m_asyncTaskStacks[task] = asyncStack;
if (recurring) m_recurringTasks.insert(task);
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index fc790a9327..c39e39d6a2 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -161,7 +161,7 @@ class V8Debugger : public v8::debug::DebugDelegate,
v8::Local<v8::Value> value);
void asyncTaskScheduledForStack(const String16& taskName, void* task,
- bool recurring);
+ bool recurring, bool skipTopFrame = false);
void asyncTaskCanceledForStack(void* task);
void asyncTaskStartedForStack(void* task);
void asyncTaskFinishedForStack(void* task);
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index 6400506610..b1b584c363 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -389,7 +389,6 @@ void V8StackTraceImpl::StackFrameIterator::next() {
while (m_currentIt == m_currentEnd && m_parent) {
const std::vector<std::shared_ptr<StackFrame>>& frames = m_parent->frames();
m_currentIt = frames.begin();
- if (m_parent->description() == "async function") ++m_currentIt;
m_currentEnd = frames.end();
m_parent = m_parent->parent().lock().get();
}
@@ -405,7 +404,8 @@ StackFrame* V8StackTraceImpl::StackFrameIterator::frame() {
// static
std::shared_ptr<AsyncStackTrace> AsyncStackTrace::capture(
- V8Debugger* debugger, const String16& description, int maxStackSize) {
+ V8Debugger* debugger, const String16& description, int maxStackSize,
+ bool skipTopFrame) {
DCHECK(debugger);
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.stack_trace"),
@@ -419,6 +419,9 @@ std::shared_ptr<AsyncStackTrace> AsyncStackTrace::capture(
v8::Local<v8::StackTrace> v8StackTrace = v8::StackTrace::CurrentStackTrace(
isolate, maxStackSize, stackTraceOptions);
frames = toFramesVector(debugger, v8StackTrace, maxStackSize);
+ if (skipTopFrame && !frames.empty()) {
+ frames.erase(frames.begin());
+ }
}
std::shared_ptr<AsyncStackTrace> asyncParent;
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.h b/deps/v8/src/inspector/v8-stack-trace-impl.h
index aaad7ab6b3..8cefffee12 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.h
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.h
@@ -118,7 +118,8 @@ class AsyncStackTrace {
AsyncStackTrace& operator=(const AsyncStackTrace&) = delete;
static std::shared_ptr<AsyncStackTrace> capture(V8Debugger*,
const String16& description,
- int maxStackSize);
+ int maxStackSize,
+ bool skipTopFrame = false);
static uintptr_t store(V8Debugger* debugger,
std::shared_ptr<AsyncStackTrace> stack);
diff --git a/deps/v8/src/inspector/value-mirror.cc b/deps/v8/src/inspector/value-mirror.cc
index 57eebb0c80..a89aca7209 100644
--- a/deps/v8/src/inspector/value-mirror.cc
+++ b/deps/v8/src/inspector/value-mirror.cc
@@ -1211,7 +1211,8 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
}
}
- auto iterator = v8::debug::PropertyIterator::Create(context, object);
+ auto iterator = v8::debug::PropertyIterator::Create(context, object,
+ nonIndexedPropertiesOnly);
if (!iterator) {
CHECK(tryCatch.HasCaught());
return false;
@@ -1219,14 +1220,6 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
while (!iterator->Done()) {
bool isOwn = iterator->is_own();
if (!isOwn && ownProperties) break;
- bool isIndex = iterator->is_array_index();
- if (isIndex && nonIndexedPropertiesOnly) {
- if (!iterator->Advance().FromMaybe(false)) {
- CHECK(tryCatch.HasCaught());
- return false;
- }
- continue;
- }
v8::Local<v8::Name> v8Name = iterator->name();
v8::Maybe<bool> result = set->Has(context, v8Name);
if (result.IsNothing()) return false;
@@ -1259,9 +1252,10 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
bool configurable = false;
bool isAccessorProperty = false;
- v8::TryCatch tryCatch(isolate);
+ v8::TryCatch tryCatchAttributes(isolate);
if (!iterator->attributes().To(&attributes)) {
- exceptionMirror = ValueMirror::create(context, tryCatch.Exception());
+ exceptionMirror =
+ ValueMirror::create(context, tryCatchAttributes.Exception());
} else {
if (iterator->is_native_accessor()) {
if (iterator->has_native_getter()) {
@@ -1275,10 +1269,11 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
configurable = !(attributes & v8::PropertyAttribute::DontDelete);
isAccessorProperty = getterMirror || setterMirror;
} else {
- v8::TryCatch tryCatch(isolate);
+ v8::TryCatch tryCatchDescriptor(isolate);
v8::debug::PropertyDescriptor descriptor;
if (!iterator->descriptor().To(&descriptor)) {
- exceptionMirror = ValueMirror::create(context, tryCatch.Exception());
+ exceptionMirror =
+ ValueMirror::create(context, tryCatchDescriptor.Exception());
} else {
writable = descriptor.has_writable ? descriptor.writable : false;
enumerable =
@@ -1300,7 +1295,7 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
isAccessorProperty = getterMirror || setterMirror;
if (name != "__proto__" && !getterFunction.IsEmpty() &&
getterFunction->ScriptId() == v8::UnboundScript::kNoScriptId) {
- v8::TryCatch tryCatch(isolate);
+ v8::TryCatch tryCatchFunction(isolate);
v8::Local<v8::Value> value;
if (v8::debug::CallFunctionOn(context, getterFunction, object, 0,
nullptr, true)
@@ -1324,7 +1319,7 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
configurable,
enumerable,
isOwn,
- isIndex,
+ iterator->is_array_index(),
isAccessorProperty && valueMirror,
std::move(valueMirror),
std::move(getterMirror),
@@ -1464,10 +1459,10 @@ String16 descriptionForNode(v8::Local<v8::Context> context,
}
}
if (!description.length()) {
- v8::Local<v8::Value> value;
+ v8::Local<v8::Value> constructor;
if (!object->Get(context, toV8String(isolate, "constructor"))
- .ToLocal(&value) ||
- !value->IsObject()) {
+ .ToLocal(&constructor) ||
+ !constructor->IsObject()) {
return String16();
}
if (!value.As<v8::Object>()
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 750ce0e0a2..f82a71202c 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -1670,7 +1670,7 @@ void BytecodeGenerator::VisitModuleDeclarations(Declaration::List* decls) {
top_level_builder()->record_module_variable_declaration();
}
} else {
- RegisterAllocationScope register_scope(this);
+ RegisterAllocationScope inner_register_scope(this);
Visit(decl);
}
}
@@ -1889,28 +1889,17 @@ bool IsSwitchOptimizable(SwitchStatement* stmt, SwitchInfo* info) {
}
// GCC also jump-table optimizes switch statements with 6 cases or more.
- if (static_cast<int>(info->covered_cases.size()) >=
- FLAG_switch_table_min_cases) {
- // Due to case spread will be used as the size of jump-table,
- // we need to check if it doesn't overflow by casting its
- // min and max bounds to int64_t, and calculate if the difference is less
- // than or equal to INT_MAX.
- int64_t min = static_cast<int64_t>(info->MinCase());
- int64_t max = static_cast<int64_t>(info->MaxCase());
- int64_t spread = max - min + 1;
-
- DCHECK_GT(spread, 0);
-
- // Check if casted spread is acceptable and doesn't overflow.
- if (spread <= INT_MAX &&
- IsSpreadAcceptable(static_cast<int>(spread), cases->length())) {
- return true;
- }
- }
- // Invariant- covered_cases has all cases and only cases that will go in the
- // jump table.
- info->covered_cases.clear();
- return false;
+ if (!(static_cast<int>(info->covered_cases.size()) >=
+ FLAG_switch_table_min_cases &&
+ IsSpreadAcceptable(info->MaxCase() - info->MinCase(),
+ cases->length()))) {
+ // Invariant- covered_cases has all cases and only cases that will go in the
+ // jump table.
+ info->covered_cases.clear();
+ return false;
+ } else {
+ return true;
+ }
}
} // namespace
@@ -3941,7 +3930,7 @@ void BytecodeGenerator::BuildFinalizeIteration(
ToBooleanMode::kConvertToBoolean, iterator_is_done.New());
{
- RegisterAllocationScope register_scope(this);
+ RegisterAllocationScope inner_register_scope(this);
BuildTryCatch(
// try {
// let method = iterator.return
@@ -4240,7 +4229,7 @@ void BytecodeGenerator::BuildDestructuringArrayAssignment(
void BytecodeGenerator::BuildDestructuringObjectAssignment(
ObjectLiteral* pattern, Token::Value op,
LookupHoistingMode lookup_hoisting_mode) {
- RegisterAllocationScope scope(this);
+ RegisterAllocationScope register_scope(this);
// Store the assignment value in a register.
Register value;
@@ -4283,7 +4272,7 @@ void BytecodeGenerator::BuildDestructuringObjectAssignment(
int i = 0;
for (ObjectLiteralProperty* pattern_property : *pattern->properties()) {
- RegisterAllocationScope scope(this);
+ RegisterAllocationScope inner_register_scope(this);
// The key of the pattern becomes the key into the RHS value, and the value
// of the pattern becomes the target of the assignment.
@@ -4380,12 +4369,16 @@ void BytecodeGenerator::BuildAssignment(
// Assign the value to the LHS.
switch (lhs_data.assign_type()) {
case NON_PROPERTY: {
- if (ObjectLiteral* pattern = lhs_data.expr()->AsObjectLiteral()) {
+ if (ObjectLiteral* pattern_as_object =
+ lhs_data.expr()->AsObjectLiteral()) {
// Split object literals into destructuring.
- BuildDestructuringObjectAssignment(pattern, op, lookup_hoisting_mode);
- } else if (ArrayLiteral* pattern = lhs_data.expr()->AsArrayLiteral()) {
+ BuildDestructuringObjectAssignment(pattern_as_object, op,
+ lookup_hoisting_mode);
+ } else if (ArrayLiteral* pattern_as_array =
+ lhs_data.expr()->AsArrayLiteral()) {
// Split object literals into destructuring.
- BuildDestructuringArrayAssignment(pattern, op, lookup_hoisting_mode);
+ BuildDestructuringArrayAssignment(pattern_as_array, op,
+ lookup_hoisting_mode);
} else {
DCHECK(lhs_data.expr()->IsVariableProxy());
VariableProxy* proxy = lhs_data.expr()->AsVariableProxy();
@@ -4868,7 +4861,7 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
if (iterator_type == IteratorType::kNormal) {
builder()->LoadAccumulatorWithRegister(output);
} else {
- RegisterAllocationScope register_scope(this);
+ RegisterAllocationScope inner_register_scope(this);
DCHECK_EQ(iterator_type, IteratorType::kAsync);
// If generatorKind is async, perform AsyncGeneratorYield(output.value),
// which will await `output.value` before resolving the current
@@ -6323,7 +6316,7 @@ void BytecodeGenerator::BuildIteratorClose(const IteratorRecord& iterator,
builder()->JumpIfJSReceiver(done.New());
{
- RegisterAllocationScope register_scope(this);
+ RegisterAllocationScope inner_register_scope(this);
Register return_result = register_allocator()->NewRegister();
builder()
->StoreAccumulatorInRegister(return_result)
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 49e4fad1fb..cba90c7893 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -273,7 +273,7 @@ TNode<Object> InterpreterAssembler::LoadRegisterFromRegisterList(
TNode<IntPtrT> InterpreterAssembler::RegisterLocationInRegisterList(
const RegListNodePair& reg_list, int index) {
- CSA_ASSERT(this,
+ CSA_DCHECK(this,
Uint32GreaterThan(reg_list.reg_count(), Int32Constant(index)));
TNode<IntPtrT> offset = RegisterFrameOffset(IntPtrConstant(index));
// Register indexes are negative, so subtract index from base location to get
@@ -299,10 +299,10 @@ void InterpreterAssembler::StoreRegisterForShortStar(TNode<Object> value,
implicit_register_use_ =
implicit_register_use_ | ImplicitRegisterUse::kWriteShortStar;
- CSA_ASSERT(
+ CSA_DCHECK(
this, UintPtrGreaterThanOrEqual(opcode, UintPtrConstant(static_cast<int>(
Bytecode::kFirstShortStar))));
- CSA_ASSERT(
+ CSA_DCHECK(
this,
UintPtrLessThanOrEqual(
opcode, UintPtrConstant(static_cast<int>(Bytecode::kLastShortStar))));
@@ -1013,7 +1013,7 @@ void InterpreterAssembler::UpdateInterruptBudget(TNode<Int32T> weight,
// Assert that the weight is positive (negative weights should be implemented
// as backward updates).
- CSA_ASSERT(this, Int32GreaterThanOrEqual(weight, Int32Constant(0)));
+ CSA_DCHECK(this, Int32GreaterThanOrEqual(weight, Int32Constant(0)));
Label load_budget_from_bytecode(this), load_budget_done(this);
TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
@@ -1399,7 +1399,7 @@ TNode<FixedArray> InterpreterAssembler::ExportParametersAndRegisterFile(
Signed(ChangeUint32ToWord(formal_parameter_count));
TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
if (FLAG_debug_code) {
- CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
+ CSA_DCHECK(this, IntPtrEqual(registers.base_reg_location(),
RegisterLocation(Register(0))));
AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
register_count);
@@ -1471,7 +1471,7 @@ TNode<FixedArray> InterpreterAssembler::ImportRegisterFile(
Signed(ChangeUint32ToWord(formal_parameter_count));
TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
if (FLAG_debug_code) {
- CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
+ CSA_DCHECK(this, IntPtrEqual(registers.base_reg_location(),
RegisterLocation(Register(0))));
AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
register_count);
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index fb23f90841..5fd642fee5 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -479,7 +479,7 @@ IGNITION_HANDLER(StaLookupSlot, InterpreterAssembler) {
BIND(&strict);
{
- CSA_ASSERT(this, IsClearWord32<StoreLookupSlotFlags::LookupHoistingModeBit>(
+ CSA_DCHECK(this, IsClearWord32<StoreLookupSlotFlags::LookupHoistingModeBit>(
bytecode_flags));
var_result =
CallRuntime(Runtime::kStoreLookupSlot_Strict, context, name, value);
@@ -1269,7 +1269,7 @@ IGNITION_HANDLER(LogicalNot, InterpreterAssembler) {
}
BIND(&if_false);
{
- CSA_ASSERT(this, TaggedEqual(value, false_value));
+ CSA_DCHECK(this, TaggedEqual(value, false_value));
result = true_value;
Goto(&end);
}
@@ -1772,11 +1772,11 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
Label if_true(this), if_false(this), end(this);
- // We juse use the final label as the default and properly CSA_ASSERT
+ // We just use the final label as the default and properly CSA_DCHECK
// that the {literal_flag} is valid here; this significantly improves
// the generated code (compared to having a default label that aborts).
unsigned const num_cases = arraysize(cases);
- CSA_ASSERT(this, Uint32LessThan(literal_flag, Int32Constant(num_cases)));
+ CSA_DCHECK(this, Uint32LessThan(literal_flag, Int32Constant(num_cases)));
Switch(literal_flag, labels[num_cases - 1], cases, labels, num_cases - 1);
BIND(&if_number);
@@ -1893,7 +1893,7 @@ IGNITION_HANDLER(JumpConstant, InterpreterAssembler) {
IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
- CSA_ASSERT(this, IsBoolean(CAST(accumulator)));
+ CSA_DCHECK(this, IsBoolean(CAST(accumulator)));
JumpIfTaggedEqual(accumulator, TrueConstant(), relative_jump);
}
@@ -1905,7 +1905,7 @@ IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) {
IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
- CSA_ASSERT(this, IsBoolean(CAST(accumulator)));
+ CSA_DCHECK(this, IsBoolean(CAST(accumulator)));
JumpIfTaggedEqual(accumulator, TrueConstant(), relative_jump);
}
@@ -1917,7 +1917,7 @@ IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) {
IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
- CSA_ASSERT(this, IsBoolean(CAST(accumulator)));
+ CSA_DCHECK(this, IsBoolean(CAST(accumulator)));
JumpIfTaggedEqual(accumulator, FalseConstant(), relative_jump);
}
@@ -1929,7 +1929,7 @@ IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) {
IGNITION_HANDLER(JumpIfFalseConstant, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
- CSA_ASSERT(this, IsBoolean(CAST(accumulator)));
+ CSA_DCHECK(this, IsBoolean(CAST(accumulator)));
JumpIfTaggedEqual(accumulator, FalseConstant(), relative_jump);
}
@@ -2200,7 +2200,7 @@ IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
// TNode<IntPtrT> acc_intptr = TryTaggedToInt32AsIntPtr(acc, &fall_through);
// TNode<IntPtrT> case_value = IntPtrSub(acc_intptr, case_value_base);
- CSA_ASSERT(this, TaggedIsSmi(acc));
+ CSA_DCHECK(this, TaggedIsSmi(acc));
TNode<IntPtrT> case_value = IntPtrSub(SmiUntag(CAST(acc)), case_value_base);
@@ -3024,17 +3024,17 @@ IGNITION_HANDLER(SwitchOnGeneratorState, InterpreterAssembler) {
SetContext(context);
TNode<UintPtrT> table_start = BytecodeOperandIdx(1);
- // TODO(leszeks): table_length is only used for a CSA_ASSERT, we don't
+ // TODO(leszeks): table_length is only used for a CSA_DCHECK, we don't
// actually need it otherwise.
TNode<UintPtrT> table_length = BytecodeOperandUImmWord(2);
// The state must be a Smi.
- CSA_ASSERT(this, TaggedIsSmi(state));
+ CSA_DCHECK(this, TaggedIsSmi(state));
TNode<IntPtrT> case_value = SmiUntag(state);
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(case_value, IntPtrConstant(0)));
- CSA_ASSERT(this, IntPtrLessThan(case_value, table_length));
+ CSA_DCHECK(this, IntPtrGreaterThanOrEqual(case_value, IntPtrConstant(0)));
+ CSA_DCHECK(this, IntPtrLessThan(case_value, table_length));
USE(table_length);
TNode<WordT> entry = IntPtrAdd(table_start, case_value);
diff --git a/deps/v8/src/json/json-parser.cc b/deps/v8/src/json/json-parser.cc
index 1c76143d59..89a3850437 100644
--- a/deps/v8/src/json/json-parser.cc
+++ b/deps/v8/src/json/json-parser.cc
@@ -5,6 +5,7 @@
#include "src/json/json-parser.h"
#include "src/base/strings.h"
+#include "src/common/globals.h"
#include "src/common/message-template.h"
#include "src/debug/debug.h"
#include "src/numbers/conversions.h"
@@ -210,19 +211,21 @@ JsonParser<Char>::JsonParser(Isolate* isolate, Handle<String> source)
original_source_(source) {
size_t start = 0;
size_t length = source->length();
- if (source->IsSlicedString()) {
+ PtrComprCageBase cage_base(isolate);
+ if (source->IsSlicedString(cage_base)) {
SlicedString string = SlicedString::cast(*source);
start = string.offset();
- String parent = string.parent();
- if (parent.IsThinString()) parent = ThinString::cast(parent).actual();
+ String parent = string.parent(cage_base);
+ if (parent.IsThinString(cage_base))
+ parent = ThinString::cast(parent).actual(cage_base);
source_ = handle(parent, isolate);
} else {
source_ = String::Flatten(isolate, source);
}
- if (StringShape(*source_).IsExternal()) {
- chars_ =
- static_cast<const Char*>(SeqExternalString::cast(*source_).GetChars());
+ if (StringShape(*source_, cage_base).IsExternal()) {
+ chars_ = static_cast<const Char*>(
+ SeqExternalString::cast(*source_).GetChars(cage_base));
chars_may_relocate_ = false;
} else {
DisallowGarbageCollection no_gc;
diff --git a/deps/v8/src/json/json-stringifier.cc b/deps/v8/src/json/json-stringifier.cc
index c86ab12a65..beebbc3fbf 100644
--- a/deps/v8/src/json/json-stringifier.cc
+++ b/deps/v8/src/json/json-stringifier.cc
@@ -315,9 +315,9 @@ bool JsonStringifier::InitializeGap(Handle<Object> gap) {
gap_[gap_length] = '\0';
}
} else if (gap->IsNumber()) {
- int num_value = DoubleToInt32(gap->Number());
- if (num_value > 0) {
- int gap_length = std::min(num_value, 10);
+ double value = std::min(gap->Number(), 10.0);
+ if (value > 0) {
+ int gap_length = DoubleToInt32(value);
gap_ = NewArray<base::uc16>(gap_length + 1);
for (int i = 0; i < gap_length; i++) gap_[i] = ' ';
gap_[gap_length] = '\0';
@@ -782,7 +782,8 @@ JsonStringifier::Result JsonStringifier::SerializeJSObject(
map->instance_descriptors(isolate_).GetDetails(i);
if (details.IsDontEnum()) continue;
Handle<Object> property;
- if (details.location() == kField && *map == object->map()) {
+ if (details.location() == PropertyLocation::kField &&
+ *map == object->map()) {
DCHECK_EQ(kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
property = JSObject::FastPropertyAt(object, details.representation(),
diff --git a/deps/v8/src/logging/counters-definitions.h b/deps/v8/src/logging/counters-definitions.h
index 0fcb2e15af..2ed7a0758f 100644
--- a/deps/v8/src/logging/counters-definitions.h
+++ b/deps/v8/src/logging/counters-definitions.h
@@ -102,7 +102,9 @@ namespace internal {
HR(turbofan_ticks, V8.TurboFan1KTicks, 0, 100000, 200) \
/* Backtracks observed in a single regexp interpreter execution */ \
/* The maximum of 100M backtracks takes roughly 2 seconds on my machine. */ \
- HR(regexp_backtracks, V8.RegExpBacktracks, 1, 100000000, 50)
+ HR(regexp_backtracks, V8.RegExpBacktracks, 1, 100000000, 50) \
+ /* See the CagedMemoryAllocationOutcome enum in backing-store.cc */ \
+ HR(caged_memory_allocation_outcome, V8.CagedMemoryAllocationOutcome, 0, 2, 3)
#define NESTED_TIMED_HISTOGRAM_LIST(HT) \
/* Timer histograms, not thread safe: HT(name, caption, max, unit) */ \
diff --git a/deps/v8/src/logging/log-utils.cc b/deps/v8/src/logging/log-utils.cc
index 67a52a5873..69567b53d9 100644
--- a/deps/v8/src/logging/log-utils.cc
+++ b/deps/v8/src/logging/log-utils.cc
@@ -12,7 +12,10 @@
#include "src/base/strings.h"
#include "src/base/vector.h"
#include "src/common/assert-scope.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate-utils.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/string-inl.h"
#include "src/strings/string-stream.h"
#include "src/utils/version.h"
@@ -108,10 +111,12 @@ void Log::MessageBuilder::AppendString(String str,
if (str.is_null()) return;
DisallowGarbageCollection no_gc; // Ensure string stays valid.
+ PtrComprCageBase cage_base = GetPtrComprCageBase(str);
+ SharedStringAccessGuardIfNeeded access_guard(str);
int length = str.length();
if (length_limit) length = std::min(length, *length_limit);
for (int i = 0; i < length; i++) {
- uint16_t c = str.Get(i);
+ uint16_t c = str.Get(i, cage_base, access_guard);
if (c <= 0xFF) {
AppendCharacter(static_cast<char>(c));
} else {
diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc
index 022d0e9c57..5ef24c1535 100644
--- a/deps/v8/src/logging/log.cc
+++ b/deps/v8/src/logging/log.cc
@@ -103,7 +103,7 @@ static const char* ComputeMarker(SharedFunctionInfo shared, AbstractCode code) {
#if V8_ENABLE_WEBASSEMBLY
static const char* ComputeMarker(const wasm::WasmCode* code) {
switch (code->kind()) {
- case wasm::WasmCode::kFunction:
+ case wasm::WasmCode::kWasmFunction:
return code->is_liftoff() ? "" : "*";
default:
return "";
@@ -944,9 +944,10 @@ class Ticker : public sampler::Sampler {
void SampleStack(const v8::RegisterState& state) override {
if (!profiler_) return;
Isolate* isolate = reinterpret_cast<Isolate*>(this->isolate());
- if (v8::Locker::IsActive() && (!isolate->thread_manager()->IsLockedByThread(
- perThreadData_->thread_id()) ||
- perThreadData_->thread_state() != nullptr))
+ if (v8::Locker::WasEverUsed() &&
+ (!isolate->thread_manager()->IsLockedByThread(
+ perThreadData_->thread_id()) ||
+ perThreadData_->thread_state() != nullptr))
return;
TickSample sample;
sample.Init(isolate, state, TickSample::kIncludeCEntryFrame, true);
diff --git a/deps/v8/src/logging/runtime-call-stats-scope.h b/deps/v8/src/logging/runtime-call-stats-scope.h
index 1be12f06da..6b3db25ae8 100644
--- a/deps/v8/src/logging/runtime-call-stats-scope.h
+++ b/deps/v8/src/logging/runtime-call-stats-scope.h
@@ -17,8 +17,10 @@ namespace internal {
#ifdef V8_RUNTIME_CALL_STATS
-#define RCS_SCOPE(...) \
- v8::internal::RuntimeCallTimerScope rcs_timer_scope(__VA_ARGS__)
+// Make the line number part of the scope's name to avoid -Wshadow warnings.
+#define RCS_SCOPE(...) \
+ v8::internal::RuntimeCallTimerScope CONCAT(rcs_timer_scope, \
+ __LINE__)(__VA_ARGS__)
RuntimeCallTimerScope::RuntimeCallTimerScope(Isolate* isolate,
RuntimeCallCounterId counter_id) {
diff --git a/deps/v8/src/logging/runtime-call-stats.cc b/deps/v8/src/logging/runtime-call-stats.cc
index 66e26096d0..a326c59c4c 100644
--- a/deps/v8/src/logging/runtime-call-stats.cc
+++ b/deps/v8/src/logging/runtime-call-stats.cc
@@ -25,17 +25,17 @@ base::TimeTicks RuntimeCallTimer::NowCPUTime() {
class RuntimeCallStatEntries {
public:
void Print(std::ostream& os) {
- if (total_call_count == 0) return;
- std::sort(entries.rbegin(), entries.rend());
+ if (total_call_count_ == 0) return;
+ std::sort(entries_.rbegin(), entries_.rend());
os << std::setw(50) << "Runtime Function/C++ Builtin" << std::setw(12)
<< "Time" << std::setw(18) << "Count" << std::endl
<< std::string(88, '=') << std::endl;
- for (Entry& entry : entries) {
- entry.SetTotal(total_time, total_call_count);
+ for (Entry& entry : entries_) {
+ entry.SetTotal(total_time_, total_call_count_);
entry.Print(os);
}
os << std::string(88, '-') << std::endl;
- Entry("Total", total_time, total_call_count).Print(os);
+ Entry("Total", total_time_, total_call_count_).Print(os);
}
// By default, the compiler will usually inline this, which results in a large
@@ -43,10 +43,10 @@ class RuntimeCallStatEntries {
// instructions, and this function is invoked repeatedly by macros.
V8_NOINLINE void Add(RuntimeCallCounter* counter) {
if (counter->count() == 0) return;
- entries.push_back(
+ entries_.push_back(
Entry(counter->name(), counter->time(), counter->count()));
- total_time += counter->time();
- total_call_count += counter->count();
+ total_time_ += counter->time();
+ total_call_count_ += counter->count();
}
private:
@@ -94,9 +94,9 @@ class RuntimeCallStatEntries {
double count_percent_;
};
- uint64_t total_call_count = 0;
- base::TimeDelta total_time;
- std::vector<Entry> entries;
+ uint64_t total_call_count_ = 0;
+ base::TimeDelta total_time_;
+ std::vector<Entry> entries_;
};
void RuntimeCallCounter::Reset() {
diff --git a/deps/v8/src/numbers/conversions.cc b/deps/v8/src/numbers/conversions.cc
index 79497a791b..a12a3f1c72 100644
--- a/deps/v8/src/numbers/conversions.cc
+++ b/deps/v8/src/numbers/conversions.cc
@@ -670,19 +670,19 @@ double InternalStringToDouble(Iterator current, EndMark end, int flags,
int insignificant_digits = 0;
bool nonzero_digit_dropped = false;
- enum Sign { NONE, NEGATIVE, POSITIVE };
+ enum class Sign { kNone, kNegative, kPositive };
- Sign sign = NONE;
+ Sign sign = Sign::kNone;
if (*current == '+') {
// Ignore leading sign.
++current;
if (current == end) return JunkStringValue();
- sign = POSITIVE;
+ sign = Sign::kPositive;
} else if (*current == '-') {
++current;
if (current == end) return JunkStringValue();
- sign = NEGATIVE;
+ sign = Sign::kNegative;
}
static const char kInfinityString[] = "Infinity";
@@ -696,20 +696,20 @@ double InternalStringToDouble(Iterator current, EndMark end, int flags,
}
DCHECK_EQ(buffer_pos, 0);
- return (sign == NEGATIVE) ? -V8_INFINITY : V8_INFINITY;
+ return (sign == Sign::kNegative) ? -V8_INFINITY : V8_INFINITY;
}
bool leading_zero = false;
if (*current == '0') {
++current;
- if (current == end) return SignedZero(sign == NEGATIVE);
+ if (current == end) return SignedZero(sign == Sign::kNegative);
leading_zero = true;
// It could be hexadecimal value.
if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
++current;
- if (current == end || !isDigit(*current, 16) || sign != NONE) {
+ if (current == end || !isDigit(*current, 16) || sign != Sign::kNone) {
return JunkStringValue(); // "0x".
}
@@ -719,7 +719,7 @@ double InternalStringToDouble(Iterator current, EndMark end, int flags,
// It could be an explicit octal value.
} else if ((flags & ALLOW_OCTAL) && (*current == 'o' || *current == 'O')) {
++current;
- if (current == end || !isDigit(*current, 8) || sign != NONE) {
+ if (current == end || !isDigit(*current, 8) || sign != Sign::kNone) {
return JunkStringValue(); // "0o".
}
@@ -729,7 +729,7 @@ double InternalStringToDouble(Iterator current, EndMark end, int flags,
// It could be a binary value.
} else if ((flags & ALLOW_BINARY) && (*current == 'b' || *current == 'B')) {
++current;
- if (current == end || !isBinaryDigit(*current) || sign != NONE) {
+ if (current == end || !isBinaryDigit(*current) || sign != Sign::kNone) {
return JunkStringValue(); // "0b".
}
@@ -740,7 +740,7 @@ double InternalStringToDouble(Iterator current, EndMark end, int flags,
// Ignore leading zeros in the integer part.
while (*current == '0') {
++current;
- if (current == end) return SignedZero(sign == NEGATIVE);
+ if (current == end) return SignedZero(sign == Sign::kNegative);
}
}
@@ -785,7 +785,7 @@ double InternalStringToDouble(Iterator current, EndMark end, int flags,
// leading zeros (if any).
while (*current == '0') {
++current;
- if (current == end) return SignedZero(sign == NEGATIVE);
+ if (current == end) return SignedZero(sign == Sign::kNegative);
exponent--; // Move this 0 into the exponent.
}
}
@@ -826,9 +826,9 @@ double InternalStringToDouble(Iterator current, EndMark end, int flags,
return JunkStringValue();
}
}
- char sign = '+';
+ char exponent_sign = '+';
if (*current == '+' || *current == '-') {
- sign = static_cast<char>(*current);
+ exponent_sign = static_cast<char>(*current);
++current;
if (current == end) {
if (allow_trailing_junk) {
@@ -862,7 +862,7 @@ double InternalStringToDouble(Iterator current, EndMark end, int flags,
++current;
} while (current != end && *current >= '0' && *current <= '9');
- exponent += (sign == '-' ? -num : num);
+ exponent += (exponent_sign == '-' ? -num : num);
}
if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
@@ -874,7 +874,8 @@ parsing_done:
if (octal) {
return InternalStringToIntDouble<3>(buffer, buffer + buffer_pos,
- sign == NEGATIVE, allow_trailing_junk);
+ sign == Sign::kNegative,
+ allow_trailing_junk);
}
if (nonzero_digit_dropped) {
@@ -887,7 +888,7 @@ parsing_done:
double converted =
Strtod(base::Vector<const char>(buffer, buffer_pos), exponent);
- return (sign == NEGATIVE) ? -converted : converted;
+ return (sign == Sign::kNegative) ? -converted : converted;
}
double StringToDouble(const char* str, int flags, double empty_string_val) {
@@ -1363,7 +1364,7 @@ char* DoubleToRadixCString(double value, int radix) {
}
char c = buffer[fraction_cursor];
// Reconstruct digit.
- int digit = c > '9' ? (c - 'a' + 10) : (c - '0');
+ digit = c > '9' ? (c - 'a' + 10) : (c - '0');
if (digit + 1 < radix) {
buffer[fraction_cursor++] = chars[digit + 1];
break;
@@ -1425,7 +1426,7 @@ base::Optional<double> TryStringToDouble(LocalIsolate* isolate,
const int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
auto buffer = std::make_unique<base::uc16[]>(max_length_for_conversion);
SharedStringAccessGuardIfNeeded access_guard(isolate);
- String::WriteToFlat(*object, buffer.get(), 0, length, access_guard);
+ String::WriteToFlat(*object, buffer.get(), 0, length, isolate, access_guard);
base::Vector<const base::uc16> v(buffer.get(), length);
return StringToDouble(v, flags);
}
diff --git a/deps/v8/src/objects/api-callbacks.tq b/deps/v8/src/objects/api-callbacks.tq
index cf94f743c4..913dd58ea6 100644
--- a/deps/v8/src/objects/api-callbacks.tq
+++ b/deps/v8/src/objects/api-callbacks.tq
@@ -16,7 +16,6 @@ bitfield struct InterceptorInfoFlags extends uint31 {
has_no_side_effect: bool: 1 bit;
}
-@generatePrint
extern class InterceptorInfo extends Struct {
getter: NonNullForeign|Zero|Undefined;
setter: NonNullForeign|Zero|Undefined;
@@ -29,7 +28,6 @@ extern class InterceptorInfo extends Struct {
flags: SmiTagged<InterceptorInfoFlags>;
}
-@generatePrint
extern class AccessCheckInfo extends Struct {
callback: Foreign|Zero|Undefined;
named_interceptor: InterceptorInfo|Zero|Undefined;
@@ -50,7 +48,6 @@ bitfield struct AccessorInfoFlags extends uint31 {
initial_attributes: PropertyAttributes: 3 bit;
}
-@generatePrint
extern class AccessorInfo extends Struct {
name: Name;
flags: SmiTagged<AccessorInfoFlags>;
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
index 55f51a7669..661e0759f6 100644
--- a/deps/v8/src/objects/arguments.h
+++ b/deps/v8/src/objects/arguments.h
@@ -8,7 +8,6 @@
#include "src/objects/fixed-array.h"
#include "src/objects/js-objects.h"
#include "src/objects/struct.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/arguments.tq b/deps/v8/src/objects/arguments.tq
index c522b1db76..8f1385c9e1 100644
--- a/deps/v8/src/objects/arguments.tq
+++ b/deps/v8/src/objects/arguments.tq
@@ -88,10 +88,7 @@ macro NewSloppyArgumentsElements<Iterator: type>(
SloppyArgumentsElements{length, context, arguments, mapped_entries: ...it};
}
-@generatePrint
-extern class AliasedArgumentsEntry extends Struct {
- aliased_context_slot: Smi;
-}
+extern class AliasedArgumentsEntry extends Struct { aliased_context_slot: Smi; }
// TODO(danno): This should be a namespace {} once supported
namespace arguments {
diff --git a/deps/v8/src/objects/backing-store.cc b/deps/v8/src/objects/backing-store.cc
index 836ad3e71d..cfe355c606 100644
--- a/deps/v8/src/objects/backing-store.cc
+++ b/deps/v8/src/objects/backing-store.cc
@@ -39,24 +39,11 @@ constexpr uint64_t kFullGuardSize = uint64_t{10} * GB;
#endif // V8_ENABLE_WEBASSEMBLY
-#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64
-// MIPS64 and LOONG64 has a user space of 2^40 bytes on most processors,
-// address space limits needs to be smaller.
-constexpr size_t kAddressSpaceLimit = 0x8000000000L; // 512 GiB
-#elif V8_TARGET_ARCH_RISCV64
-// RISC-V64 has a user space of 256GB on the Sv39 scheme.
-constexpr size_t kAddressSpaceLimit = 0x4000000000L; // 256 GiB
-#elif V8_TARGET_ARCH_64_BIT
-constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
-#else
-constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB
-#endif
-
-std::atomic<uint64_t> reserved_address_space_{0};
+std::atomic<uint32_t> next_backing_store_id_{1};
// Allocation results are reported to UMA
//
-// See wasm_memory_allocation_result in counters.h
+// See wasm_memory_allocation_result in counters-definitions.h
enum class AllocationStatus {
kSuccess, // Succeeded on the first try
@@ -68,6 +55,19 @@ enum class AllocationStatus {
kOtherFailure // Failed for an unknown reason
};
+// Attempts to allocate memory inside the virtual memory cage currently fall
+// back to allocating memory outside of the cage if necessary. Once this
+// fallback is no longer allowed/possible, these cases will become allocation
+// failures instead. To track the frequency of such events, the outcome of
+// memory allocation attempts inside the cage is reported to UMA.
+//
+// See caged_memory_allocation_outcome in counters-definitions.h
+enum class CagedMemoryAllocationOutcome {
+ kSuccess, // Allocation succeeded inside the cage
+ kOutsideCage, // Allocation failed inside the cage but succeeded outside
+ kFailure, // Allocation failed inside and outside of the cage
+};
+
base::AddressRegion GetReservedRegion(bool has_guard_regions,
void* buffer_start,
size_t byte_capacity) {
@@ -107,6 +107,29 @@ void RecordStatus(Isolate* isolate, AllocationStatus status) {
static_cast<int>(status));
}
+// When the virtual memory cage is active, this function records the outcome of
+// attempts to allocate memory inside the cage which fall back to allocating
+// memory outside of the cage. Passing a value of nullptr for the result
+// indicates that the memory could not be allocated at all.
+void RecordCagedMemoryAllocationResult(Isolate* isolate, void* result) {
+ // This metric is only meaningful when the virtual memory cage is active.
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (GetProcessWideVirtualMemoryCage()->is_initialized()) {
+ CagedMemoryAllocationOutcome outcome;
+ if (result) {
+ bool allocation_in_cage =
+ GetProcessWideVirtualMemoryCage()->Contains(result);
+ outcome = allocation_in_cage ? CagedMemoryAllocationOutcome::kSuccess
+ : CagedMemoryAllocationOutcome::kOutsideCage;
+ } else {
+ outcome = CagedMemoryAllocationOutcome::kFailure;
+ }
+ isolate->counters()->caged_memory_allocation_outcome()->AddSample(
+ static_cast<int>(outcome));
+ }
+#endif
+}
+
inline void DebugCheckZero(void* start, size_t byte_length) {
#if DEBUG
// Double check memory is zero-initialized. Despite being DEBUG-only,
@@ -145,6 +168,34 @@ void BackingStore::Clear() {
type_specific_data_.v8_api_array_buffer_allocator = nullptr;
}
+BackingStore::BackingStore(void* buffer_start, size_t byte_length,
+ size_t max_byte_length, size_t byte_capacity,
+ SharedFlag shared, ResizableFlag resizable,
+ bool is_wasm_memory, bool free_on_destruct,
+ bool has_guard_regions, bool custom_deleter,
+ bool empty_deleter)
+ : buffer_start_(buffer_start),
+ byte_length_(byte_length),
+ max_byte_length_(max_byte_length),
+ byte_capacity_(byte_capacity),
+ id_(next_backing_store_id_.fetch_add(1)),
+ is_shared_(shared == SharedFlag::kShared),
+ is_resizable_(resizable == ResizableFlag::kResizable),
+ is_wasm_memory_(is_wasm_memory),
+ holds_shared_ptr_to_allocator_(false),
+ free_on_destruct_(free_on_destruct),
+ has_guard_regions_(has_guard_regions),
+ globally_registered_(false),
+ custom_deleter_(custom_deleter),
+ empty_deleter_(empty_deleter) {
+ // TODO(v8:11111): RAB / GSAB - Wasm integration.
+ DCHECK_IMPLIES(is_wasm_memory_, !is_resizable_);
+ DCHECK_IMPLIES(is_resizable_, !custom_deleter_);
+ DCHECK_IMPLIES(is_resizable_, free_on_destruct_);
+ DCHECK_IMPLIES(!is_wasm_memory && !is_resizable_,
+ byte_length_ == max_byte_length_);
+}
+
BackingStore::~BackingStore() {
GlobalBackingStoreRegistry::Unregister(this);
@@ -154,11 +205,14 @@ BackingStore::~BackingStore() {
}
PageAllocator* page_allocator = GetPlatformPageAllocator();
+ // TODO(saelo) here and elsewhere in this file, replace with
+ // GetArrayBufferPageAllocator once the fallback to the platform page
+ // allocator is no longer allowed.
#ifdef V8_VIRTUAL_MEMORY_CAGE
if (GetProcessWideVirtualMemoryCage()->Contains(buffer_start_)) {
- page_allocator = GetPlatformDataCagePageAllocator();
+ page_allocator = GetVirtualMemoryCagePageAllocator();
} else {
- DCHECK(kAllowBackingStoresOutsideDataCage);
+ DCHECK(kAllowBackingStoresOutsideCage);
}
#endif
@@ -189,7 +243,6 @@ BackingStore::~BackingStore() {
FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
region.size());
CHECK(pages_were_freed);
- BackingStore::ReleaseReservation(reservation_size);
Clear();
return;
}
@@ -198,8 +251,6 @@ BackingStore::~BackingStore() {
if (is_resizable_) {
DCHECK(free_on_destruct_);
DCHECK(!custom_deleter_);
- size_t reservation_size =
- GetReservationSize(has_guard_regions_, byte_capacity_);
auto region =
GetReservedRegion(has_guard_regions_, buffer_start_, byte_capacity_);
@@ -208,7 +259,6 @@ BackingStore::~BackingStore() {
FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
region.size());
CHECK(pages_were_freed);
- BackingStore::ReleaseReservation(reservation_size);
Clear();
return;
}
@@ -330,25 +380,6 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
}
#endif // V8_ENABLE_WEBASSEMBLY
-bool BackingStore::ReserveAddressSpace(uint64_t num_bytes) {
- uint64_t reservation_limit = kAddressSpaceLimit;
- uint64_t old_count = reserved_address_space_.load(std::memory_order_relaxed);
- while (true) {
- if (old_count > reservation_limit) return false;
- if (reservation_limit - old_count < num_bytes) return false;
- if (reserved_address_space_.compare_exchange_weak(
- old_count, old_count + num_bytes, std::memory_order_acq_rel)) {
- return true;
- }
- }
-}
-
-void BackingStore::ReleaseReservation(uint64_t num_bytes) {
- uint64_t old_reserved = reserved_address_space_.fetch_sub(num_bytes);
- USE(old_reserved);
- DCHECK_LE(num_bytes, old_reserved);
-}
-
std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
Isolate* isolate, size_t byte_length, size_t max_byte_length,
size_t page_size, size_t initial_pages, size_t maximum_pages,
@@ -391,41 +422,23 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
size_t reservation_size = GetReservationSize(guards, byte_capacity);
//--------------------------------------------------------------------------
- // 1. Enforce maximum address space reservation per engine.
- //--------------------------------------------------------------------------
- auto reserve_memory_space = [&] {
- return BackingStore::ReserveAddressSpace(reservation_size);
- };
-
- if (!gc_retry(reserve_memory_space)) {
- // Crash on out-of-memory if the correctness fuzzer is running.
- if (FLAG_correctness_fuzzer_suppressions) {
- FATAL("could not allocate wasm memory backing store");
- }
- RecordStatus(isolate, AllocationStatus::kAddressSpaceLimitReachedFailure);
- TRACE_BS("BSw:try failed to reserve address space (size %zu)\n",
- reservation_size);
- return {};
- }
-
- //--------------------------------------------------------------------------
- // 2. Allocate pages (inaccessible by default).
+ // Allocate pages (inaccessible by default).
//--------------------------------------------------------------------------
void* allocation_base = nullptr;
PageAllocator* page_allocator = GetPlatformPageAllocator();
auto allocate_pages = [&] {
#ifdef V8_VIRTUAL_MEMORY_CAGE
- page_allocator = GetPlatformDataCagePageAllocator();
+ page_allocator = GetVirtualMemoryCagePageAllocator();
allocation_base = AllocatePages(page_allocator, nullptr, reservation_size,
page_size, PageAllocator::kNoAccess);
if (allocation_base) return true;
// We currently still allow falling back to the platform page allocator if
- // the data cage page allocator fails. This will eventually be removed.
+ // the cage page allocator fails. This will eventually be removed.
// TODO(chromium:1218005) once we forbid the fallback, we should have a
- // single API, e.g. GetPlatformDataPageAllocator(), that returns the correct
+ // single API, e.g. GetArrayBufferPageAllocator(), that returns the correct
// page allocator to use here depending on whether the virtual memory cage
// is enabled or not.
- if (!kAllowBackingStoresOutsideDataCage) return false;
+ if (!kAllowBackingStoresOutsideCage) return false;
page_allocator = GetPlatformPageAllocator();
#endif
allocation_base = AllocatePages(page_allocator, nullptr, reservation_size,
@@ -434,8 +447,8 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
};
if (!gc_retry(allocate_pages)) {
// Page allocator could not reserve enough pages.
- BackingStore::ReleaseReservation(reservation_size);
RecordStatus(isolate, AllocationStatus::kOtherFailure);
+ RecordCagedMemoryAllocationResult(isolate, nullptr);
TRACE_BS("BSw:try failed to allocate pages\n");
return {};
}
@@ -451,8 +464,9 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
DCHECK(!guards);
byte* buffer_start = reinterpret_cast<byte*>(allocation_base);
#endif
+
//--------------------------------------------------------------------------
- // 3. Commit the initial pages (allow read/write).
+ // Commit the initial pages (allow read/write).
//--------------------------------------------------------------------------
size_t committed_byte_length = initial_pages * page_size;
auto commit_memory = [&] {
@@ -471,6 +485,7 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
RecordStatus(isolate, did_retry ? AllocationStatus::kSuccessAfterRetry
: AllocationStatus::kSuccess);
+ RecordCagedMemoryAllocationResult(isolate, allocation_base);
ResizableFlag resizable =
is_wasm_memory ? ResizableFlag::kNotResizable : ResizableFlag::kResizable;
diff --git a/deps/v8/src/objects/backing-store.h b/deps/v8/src/objects/backing-store.h
index 6c709c2b96..5ba95a2ba8 100644
--- a/deps/v8/src/objects/backing-store.h
+++ b/deps/v8/src/objects/backing-store.h
@@ -138,12 +138,6 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
static void UpdateSharedWasmMemoryObjects(Isolate* isolate);
#endif // V8_ENABLE_WEBASSEMBLY
- // TODO(wasm): address space limitations should be enforced in page alloc.
- // These methods enforce a limit on the total amount of address space,
- // which is used for both backing stores and wasm memory.
- static bool ReserveAddressSpace(uint64_t num_bytes);
- static void ReleaseReservation(uint64_t num_bytes);
-
// Returns the size of the external memory owned by this backing store.
// It is used for triggering GCs based on the external memory pressure.
size_t PerIsolateAccountingLength() {
@@ -163,44 +157,29 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
return byte_length();
}
+ uint32_t id() const { return id_; }
+
private:
friend class GlobalBackingStoreRegistry;
BackingStore(void* buffer_start, size_t byte_length, size_t max_byte_length,
size_t byte_capacity, SharedFlag shared, ResizableFlag resizable,
bool is_wasm_memory, bool free_on_destruct,
- bool has_guard_regions, bool custom_deleter, bool empty_deleter)
- : buffer_start_(buffer_start),
- byte_length_(byte_length),
- max_byte_length_(max_byte_length),
- byte_capacity_(byte_capacity),
- is_shared_(shared == SharedFlag::kShared),
- is_resizable_(resizable == ResizableFlag::kResizable),
- is_wasm_memory_(is_wasm_memory),
- holds_shared_ptr_to_allocator_(false),
- free_on_destruct_(free_on_destruct),
- has_guard_regions_(has_guard_regions),
- globally_registered_(false),
- custom_deleter_(custom_deleter),
- empty_deleter_(empty_deleter) {
- // TODO(v8:11111): RAB / GSAB - Wasm integration.
- DCHECK_IMPLIES(is_wasm_memory_, !is_resizable_);
- DCHECK_IMPLIES(is_resizable_, !custom_deleter_);
- DCHECK_IMPLIES(is_resizable_, free_on_destruct_);
- DCHECK_IMPLIES(!is_wasm_memory && !is_resizable_,
- byte_length_ == max_byte_length_);
- }
+ bool has_guard_regions, bool custom_deleter, bool empty_deleter);
BackingStore(const BackingStore&) = delete;
BackingStore& operator=(const BackingStore&) = delete;
void SetAllocatorFromIsolate(Isolate* isolate);
void* buffer_start_ = nullptr;
- std::atomic<size_t> byte_length_{0};
+ std::atomic<size_t> byte_length_;
// Max byte length of the corresponding JSArrayBuffer(s).
- size_t max_byte_length_ = 0;
+ size_t max_byte_length_;
// Amount of the memory allocated
- size_t byte_capacity_ = 0;
-
+ size_t byte_capacity_;
+ // Unique ID of this backing store. Currently only used by DevTools, to
+ // identify stores used by several ArrayBuffers or WebAssembly memories
+ // (reported by the inspector as [[ArrayBufferData]] internal property)
+ uint32_t id_;
struct DeleterInfo {
v8::BackingStore::DeleterCallback callback;
void* data;
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index 3f1f12bcc2..5f323aa4ec 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -81,50 +81,11 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
}
// Internal helpers.
- static MaybeHandle<MutableBigInt> BitwiseAnd(Isolate* isolate,
- Handle<BigInt> x,
- Handle<BigInt> y);
- static MaybeHandle<MutableBigInt> BitwiseXor(Isolate* isolate,
- Handle<BigInt> x,
- Handle<BigInt> y);
- static MaybeHandle<MutableBigInt> BitwiseOr(Isolate* isolate,
- Handle<BigInt> x,
- Handle<BigInt> y);
-
- static Handle<BigInt> TruncateToNBits(Isolate* isolate, int n,
- Handle<BigInt> x);
- static Handle<BigInt> TruncateAndSubFromPowerOfTwo(Isolate* isolate, int n,
- Handle<BigInt> x,
- bool result_sign);
-
static MaybeHandle<MutableBigInt> AbsoluteAddOne(
Isolate* isolate, Handle<BigIntBase> x, bool sign,
MutableBigInt result_storage = MutableBigInt());
static Handle<MutableBigInt> AbsoluteSubOne(Isolate* isolate,
Handle<BigIntBase> x);
- static MaybeHandle<MutableBigInt> AbsoluteSubOne(Isolate* isolate,
- Handle<BigIntBase> x,
- int result_length);
-
- enum ExtraDigitsHandling { kCopy, kSkip };
- enum SymmetricOp { kSymmetric, kNotSymmetric };
- static inline Handle<MutableBigInt> AbsoluteBitwiseOp(
- Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt result_storage, ExtraDigitsHandling extra_digits,
- SymmetricOp symmetric,
- const std::function<digit_t(digit_t, digit_t)>& op);
- static Handle<MutableBigInt> AbsoluteAnd(
- Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt result_storage = MutableBigInt());
- static Handle<MutableBigInt> AbsoluteAndNot(
- Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt result_storage = MutableBigInt());
- static Handle<MutableBigInt> AbsoluteOr(
- Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt result_storage = MutableBigInt());
- static Handle<MutableBigInt> AbsoluteXor(
- Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt result_storage = MutableBigInt());
// Specialized helpers for shift operations.
static MaybeHandle<BigInt> LeftShiftByAbsolute(Isolate* isolate,
@@ -145,9 +106,6 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
// representation.
static uint64_t GetRawBits(BigIntBase x, bool* lossless);
- // Digit arithmetic helpers.
- static inline digit_t digit_add(digit_t a, digit_t b, digit_t* carry);
- static inline digit_t digit_sub(digit_t a, digit_t b, digit_t* borrow);
static inline bool digit_ismax(digit_t x) {
return static_cast<digit_t>(~x) == 0;
}
@@ -406,7 +364,7 @@ MaybeHandle<BigInt> BigInt::BitwiseNot(Isolate* isolate, Handle<BigInt> x) {
MaybeHandle<MutableBigInt> result;
if (x->sign()) {
// ~(-x) == ~(~(x-1)) == x-1
- result = MutableBigInt::AbsoluteSubOne(isolate, x, x->length());
+ result = MutableBigInt::AbsoluteSubOne(isolate, x);
} else {
// ~x == -x-1 == -(x+1)
result = MutableBigInt::AbsoluteAddOne(isolate, x, true);
@@ -673,96 +631,82 @@ bool BigInt::EqualToBigInt(BigInt x, BigInt y) {
MaybeHandle<BigInt> BigInt::BitwiseAnd(Isolate* isolate, Handle<BigInt> x,
Handle<BigInt> y) {
- return MutableBigInt::MakeImmutable(MutableBigInt::BitwiseAnd(isolate, x, y));
-}
-
-MaybeHandle<MutableBigInt> MutableBigInt::BitwiseAnd(Isolate* isolate,
- Handle<BigInt> x,
- Handle<BigInt> y) {
- if (!x->sign() && !y->sign()) {
- return AbsoluteAnd(isolate, x, y);
- } else if (x->sign() && y->sign()) {
- int result_length = std::max(x->length(), y->length()) + 1;
- // (-x) & (-y) == ~(x-1) & ~(y-1) == ~((x-1) | (y-1))
- // == -(((x-1) | (y-1)) + 1)
- Handle<MutableBigInt> result;
- if (!AbsoluteSubOne(isolate, x, result_length).ToHandle(&result)) {
- return MaybeHandle<MutableBigInt>();
+ bool x_sign = x->sign();
+ bool y_sign = y->sign();
+ Handle<MutableBigInt> result;
+ if (!x_sign && !y_sign) {
+ int result_length =
+ bigint::BitwiseAnd_PosPos_ResultLength(x->length(), y->length());
+ result = MutableBigInt::New(isolate, result_length).ToHandleChecked();
+ bigint::BitwiseAnd_PosPos(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ DCHECK(!result->sign());
+ } else if (x_sign && y_sign) {
+ int result_length =
+ bigint::BitwiseAnd_NegNeg_ResultLength(x->length(), y->length());
+ if (!MutableBigInt::New(isolate, result_length).ToHandle(&result)) {
+ return {};
}
- Handle<MutableBigInt> y_1 = AbsoluteSubOne(isolate, y);
- result = AbsoluteOr(isolate, result, y_1, *result);
- return AbsoluteAddOne(isolate, result, true, *result);
+ bigint::BitwiseAnd_NegNeg(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ result->set_sign(true);
} else {
- DCHECK(x->sign() != y->sign());
- // Assume that x is the positive BigInt.
- if (x->sign()) std::swap(x, y);
- // x & (-y) == x & ~(y-1) == x &~ (y-1)
- Handle<MutableBigInt> y_1 = AbsoluteSubOne(isolate, y);
- return AbsoluteAndNot(isolate, x, y_1);
+ if (x_sign) std::swap(x, y);
+ int result_length = bigint::BitwiseAnd_PosNeg_ResultLength(x->length());
+ result = MutableBigInt::New(isolate, result_length).ToHandleChecked();
+ bigint::BitwiseAnd_PosNeg(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ DCHECK(!result->sign());
}
+ return MutableBigInt::MakeImmutable(result);
}
MaybeHandle<BigInt> BigInt::BitwiseXor(Isolate* isolate, Handle<BigInt> x,
Handle<BigInt> y) {
- return MutableBigInt::MakeImmutable(MutableBigInt::BitwiseXor(isolate, x, y));
-}
-
-MaybeHandle<MutableBigInt> MutableBigInt::BitwiseXor(Isolate* isolate,
- Handle<BigInt> x,
- Handle<BigInt> y) {
- if (!x->sign() && !y->sign()) {
- return AbsoluteXor(isolate, x, y);
- } else if (x->sign() && y->sign()) {
- int result_length = std::max(x->length(), y->length());
- // (-x) ^ (-y) == ~(x-1) ^ ~(y-1) == (x-1) ^ (y-1)
- Handle<MutableBigInt> result =
- AbsoluteSubOne(isolate, x, result_length).ToHandleChecked();
- Handle<MutableBigInt> y_1 = AbsoluteSubOne(isolate, y);
- return AbsoluteXor(isolate, result, y_1, *result);
+ bool x_sign = x->sign();
+ bool y_sign = y->sign();
+ Handle<MutableBigInt> result;
+ if (!x_sign && !y_sign) {
+ int result_length =
+ bigint::BitwiseXor_PosPos_ResultLength(x->length(), y->length());
+ result = MutableBigInt::New(isolate, result_length).ToHandleChecked();
+ bigint::BitwiseXor_PosPos(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ DCHECK(!result->sign());
+ } else if (x_sign && y_sign) {
+ int result_length =
+ bigint::BitwiseXor_NegNeg_ResultLength(x->length(), y->length());
+ result = MutableBigInt::New(isolate, result_length).ToHandleChecked();
+ bigint::BitwiseXor_NegNeg(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ DCHECK(!result->sign());
} else {
- DCHECK(x->sign() != y->sign());
- int result_length = std::max(x->length(), y->length()) + 1;
- // Assume that x is the positive BigInt.
- if (x->sign()) std::swap(x, y);
- // x ^ (-y) == x ^ ~(y-1) == ~(x ^ (y-1)) == -((x ^ (y-1)) + 1)
- Handle<MutableBigInt> result;
- if (!AbsoluteSubOne(isolate, y, result_length).ToHandle(&result)) {
- return MaybeHandle<MutableBigInt>();
+ if (x_sign) std::swap(x, y);
+ int result_length =
+ bigint::BitwiseXor_PosNeg_ResultLength(x->length(), y->length());
+ if (!MutableBigInt::New(isolate, result_length).ToHandle(&result)) {
+ return {};
}
- result = AbsoluteXor(isolate, result, x, *result);
- return AbsoluteAddOne(isolate, result, true, *result);
+ bigint::BitwiseXor_PosNeg(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ result->set_sign(true);
}
+ return MutableBigInt::MakeImmutable(result);
}
MaybeHandle<BigInt> BigInt::BitwiseOr(Isolate* isolate, Handle<BigInt> x,
Handle<BigInt> y) {
- return MutableBigInt::MakeImmutable(MutableBigInt::BitwiseOr(isolate, x, y));
-}
-
-MaybeHandle<MutableBigInt> MutableBigInt::BitwiseOr(Isolate* isolate,
- Handle<BigInt> x,
- Handle<BigInt> y) {
- int result_length = std::max(x->length(), y->length());
- if (!x->sign() && !y->sign()) {
- return AbsoluteOr(isolate, x, y);
- } else if (x->sign() && y->sign()) {
- // (-x) | (-y) == ~(x-1) | ~(y-1) == ~((x-1) & (y-1))
- // == -(((x-1) & (y-1)) + 1)
- Handle<MutableBigInt> result =
- AbsoluteSubOne(isolate, x, result_length).ToHandleChecked();
- Handle<MutableBigInt> y_1 = AbsoluteSubOne(isolate, y);
- result = AbsoluteAnd(isolate, result, y_1, *result);
- return AbsoluteAddOne(isolate, result, true, *result);
+ bool x_sign = x->sign();
+ bool y_sign = y->sign();
+ int result_length = bigint::BitwiseOrResultLength(x->length(), y->length());
+ Handle<MutableBigInt> result =
+ MutableBigInt::New(isolate, result_length).ToHandleChecked();
+ if (!x_sign && !y_sign) {
+ bigint::BitwiseOr_PosPos(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ DCHECK(!result->sign());
+ } else if (x_sign && y_sign) {
+ bigint::BitwiseOr_NegNeg(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ result->set_sign(true);
} else {
- DCHECK(x->sign() != y->sign());
- // Assume that x is the positive BigInt.
- if (x->sign()) std::swap(x, y);
- // x | (-y) == x | ~(y-1) == ~((y-1) &~ x) == -(((y-1) &~ x) + 1)
- Handle<MutableBigInt> result =
- AbsoluteSubOne(isolate, y, result_length).ToHandleChecked();
- result = AbsoluteAndNot(isolate, result, x, *result);
- return AbsoluteAddOne(isolate, result, true, *result);
+ if (x_sign) std::swap(x, y);
+ bigint::BitwiseOr_PosNeg(GetRWDigits(result), GetDigits(x), GetDigits(y));
+ result->set_sign(true);
}
+ return MutableBigInt::MakeImmutable(result);
}
MaybeHandle<BigInt> BigInt::Increment(Isolate* isolate, Handle<BigInt> x) {
@@ -1270,16 +1214,12 @@ MaybeHandle<MutableBigInt> MutableBigInt::AbsoluteAddOne(
} else {
DCHECK(result->length() == result_length);
}
- digit_t carry = 1;
- for (int i = 0; i < input_length; i++) {
- digit_t new_carry = 0;
- result->set_digit(i, digit_add(x->digit(i), carry, &new_carry));
- carry = new_carry;
- }
- if (result_length > input_length) {
- result->set_digit(input_length, carry);
+ if (input_length == 0) {
+ result->set_digit(0, 1);
+ } else if (input_length == 1 && !will_overflow) {
+ result->set_digit(0, x->digit(0) + 1);
} else {
- DCHECK_EQ(carry, 0);
+ bigint::AddOne(GetRWDigits(result), GetDigits(x));
}
result->set_sign(sign);
return result;
@@ -1289,134 +1229,16 @@ MaybeHandle<MutableBigInt> MutableBigInt::AbsoluteAddOne(
Handle<MutableBigInt> MutableBigInt::AbsoluteSubOne(Isolate* isolate,
Handle<BigIntBase> x) {
DCHECK(!x->is_zero());
- // Requesting a result length identical to an existing BigInt's length
- // cannot overflow the limit.
- return AbsoluteSubOne(isolate, x, x->length()).ToHandleChecked();
-}
-
-// Like the above, but you can specify that the allocated result should have
-// length {result_length}, which must be at least as large as {x->length()}.
-MaybeHandle<MutableBigInt> MutableBigInt::AbsoluteSubOne(Isolate* isolate,
- Handle<BigIntBase> x,
- int result_length) {
- DCHECK(!x->is_zero());
- DCHECK(result_length >= x->length());
- Handle<MutableBigInt> result;
- if (!New(isolate, result_length).ToHandle(&result)) {
- return MaybeHandle<MutableBigInt>();
- }
int length = x->length();
- digit_t borrow = 1;
- for (int i = 0; i < length; i++) {
- digit_t new_borrow = 0;
- result->set_digit(i, digit_sub(x->digit(i), borrow, &new_borrow));
- borrow = new_borrow;
- }
- DCHECK_EQ(borrow, 0);
- for (int i = length; i < result_length; i++) {
- result->set_digit(i, borrow);
- }
- return result;
-}
-
-// Helper for Absolute{And,AndNot,Or,Xor}.
-// Performs the given binary {op} on digit pairs of {x} and {y}; when the
-// end of the shorter of the two is reached, {extra_digits} configures how
-// remaining digits in the longer input (if {symmetric} == kSymmetric, in
-// {x} otherwise) are handled: copied to the result or ignored.
-// If {result_storage} is non-nullptr, it will be used for the result and
-// any extra digits in it will be zeroed out, otherwise a new BigInt (with
-// the same length as the longer input) will be allocated.
-// {result_storage} may alias {x} or {y} for in-place modification.
-// Example:
-// y: [ y2 ][ y1 ][ y0 ]
-// x: [ x3 ][ x2 ][ x1 ][ x0 ]
-// | | | |
-// (kCopy) (op) (op) (op)
-// | | | |
-// v v v v
-// result_storage: [ 0 ][ x3 ][ r2 ][ r1 ][ r0 ]
-inline Handle<MutableBigInt> MutableBigInt::AbsoluteBitwiseOp(
- Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt result_storage, ExtraDigitsHandling extra_digits,
- SymmetricOp symmetric, const std::function<digit_t(digit_t, digit_t)>& op) {
- int x_length = x->length();
- int y_length = y->length();
- int num_pairs = y_length;
- if (x_length < y_length) {
- num_pairs = x_length;
- if (symmetric == kSymmetric) {
- std::swap(x, y);
- std::swap(x_length, y_length);
- }
- }
- DCHECK(num_pairs == std::min(x_length, y_length));
- Handle<MutableBigInt> result(result_storage, isolate);
- int result_length = extra_digits == kCopy ? x_length : num_pairs;
- if (result_storage.is_null()) {
- result = New(isolate, result_length).ToHandleChecked();
+ Handle<MutableBigInt> result = New(isolate, length).ToHandleChecked();
+ if (length == 1) {
+ result->set_digit(0, x->digit(0) - 1);
} else {
- DCHECK(result_storage.length() >= result_length);
- result_length = result_storage.length();
- }
- int i = 0;
- for (; i < num_pairs; i++) {
- result->set_digit(i, op(x->digit(i), y->digit(i)));
- }
- if (extra_digits == kCopy) {
- for (; i < x_length; i++) {
- result->set_digit(i, x->digit(i));
- }
- }
- for (; i < result_length; i++) {
- result->set_digit(i, 0);
+ bigint::SubtractOne(GetRWDigits(result), GetDigits(x));
}
return result;
}
-// If {result_storage} is non-nullptr, it will be used for the result,
-// otherwise a new BigInt of appropriate length will be allocated.
-// {result_storage} may alias {x} or {y} for in-place modification.
-Handle<MutableBigInt> MutableBigInt::AbsoluteAnd(Isolate* isolate,
- Handle<BigIntBase> x,
- Handle<BigIntBase> y,
- MutableBigInt result_storage) {
- return AbsoluteBitwiseOp(isolate, x, y, result_storage, kSkip, kSymmetric,
- [](digit_t a, digit_t b) { return a & b; });
-}
-
-// If {result_storage} is non-nullptr, it will be used for the result,
-// otherwise a new BigInt of appropriate length will be allocated.
-// {result_storage} may alias {x} or {y} for in-place modification.
-Handle<MutableBigInt> MutableBigInt::AbsoluteAndNot(
- Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
- MutableBigInt result_storage) {
- return AbsoluteBitwiseOp(isolate, x, y, result_storage, kCopy, kNotSymmetric,
- [](digit_t a, digit_t b) { return a & ~b; });
-}
-
-// If {result_storage} is non-nullptr, it will be used for the result,
-// otherwise a new BigInt of appropriate length will be allocated.
-// {result_storage} may alias {x} or {y} for in-place modification.
-Handle<MutableBigInt> MutableBigInt::AbsoluteOr(Isolate* isolate,
- Handle<BigIntBase> x,
- Handle<BigIntBase> y,
- MutableBigInt result_storage) {
- return AbsoluteBitwiseOp(isolate, x, y, result_storage, kCopy, kSymmetric,
- [](digit_t a, digit_t b) { return a | b; });
-}
-
-// If {result_storage} is non-nullptr, it will be used for the result,
-// otherwise a new BigInt of appropriate length will be allocated.
-// {result_storage} may alias {x} or {y} for in-place modification.
-Handle<MutableBigInt> MutableBigInt::AbsoluteXor(Isolate* isolate,
- Handle<BigIntBase> x,
- Handle<BigIntBase> y,
- MutableBigInt result_storage) {
- return AbsoluteBitwiseOp(isolate, x, y, result_storage, kCopy, kSymmetric,
- [](digit_t a, digit_t b) { return a ^ b; });
-}
-
MaybeHandle<BigInt> MutableBigInt::LeftShiftByAbsolute(Isolate* isolate,
Handle<BigIntBase> x,
Handle<BigIntBase> y) {
@@ -1660,160 +1482,41 @@ MaybeHandle<BigInt> BigInt::FromSerializedDigits(
}
Handle<BigInt> BigInt::AsIntN(Isolate* isolate, uint64_t n, Handle<BigInt> x) {
- if (x->is_zero()) return x;
+ if (x->is_zero() || n > kMaxLengthBits) return x;
if (n == 0) return MutableBigInt::Zero(isolate);
- uint64_t needed_length = (n + kDigitBits - 1) / kDigitBits;
- uint64_t x_length = static_cast<uint64_t>(x->length());
- // If {x} has less than {n} bits, return it directly.
- if (x_length < needed_length) return x;
- DCHECK_LE(needed_length, kMaxInt);
- digit_t top_digit = x->digit(static_cast<int>(needed_length) - 1);
- digit_t compare_digit = static_cast<digit_t>(1) << ((n - 1) % kDigitBits);
- if (x_length == needed_length && top_digit < compare_digit) return x;
- // Otherwise we have to truncate (which is a no-op in the special case
- // of x == -2^(n-1)), and determine the right sign. We also might have
- // to subtract from 2^n to simulate having two's complement representation.
- // In most cases, the result's sign is x->sign() xor "(n-1)th bit present".
- // The only exception is when x is negative, has the (n-1)th bit, and all
- // its bits below (n-1) are zero. In that case, the result is the minimum
- // n-bit integer (example: asIntN(3, -12n) => -4n).
- bool has_bit = (top_digit & compare_digit) == compare_digit;
- DCHECK_LE(n, kMaxInt);
- int N = static_cast<int>(n);
- if (!has_bit) {
- return MutableBigInt::TruncateToNBits(isolate, N, x);
- }
- if (!x->sign()) {
- return MutableBigInt::TruncateAndSubFromPowerOfTwo(isolate, N, x, true);
- }
- // Negative numbers must subtract from 2^n, except for the special case
- // described above.
- if ((top_digit & (compare_digit - 1)) == 0) {
- for (int i = static_cast<int>(needed_length) - 2; i >= 0; i--) {
- if (x->digit(i) != 0) {
- return MutableBigInt::TruncateAndSubFromPowerOfTwo(isolate, N, x,
- false);
- }
- }
- // Truncation is no-op if x == -2^(n-1).
- if (x_length == needed_length && top_digit == compare_digit) return x;
- return MutableBigInt::TruncateToNBits(isolate, N, x);
- }
- return MutableBigInt::TruncateAndSubFromPowerOfTwo(isolate, N, x, false);
+ int needed_length =
+ bigint::AsIntNResultLength(GetDigits(x), x->sign(), static_cast<int>(n));
+ if (needed_length == -1) return x;
+ Handle<MutableBigInt> result =
+ MutableBigInt::New(isolate, needed_length).ToHandleChecked();
+ bool negative = bigint::AsIntN(GetRWDigits(result), GetDigits(x), x->sign(),
+ static_cast<int>(n));
+ result->set_sign(negative);
+ return MutableBigInt::MakeImmutable(result);
}
MaybeHandle<BigInt> BigInt::AsUintN(Isolate* isolate, uint64_t n,
Handle<BigInt> x) {
if (x->is_zero()) return x;
if (n == 0) return MutableBigInt::Zero(isolate);
- // If {x} is negative, simulate two's complement representation.
+ Handle<MutableBigInt> result;
if (x->sign()) {
if (n > kMaxLengthBits) {
return ThrowBigIntTooBig<BigInt>(isolate);
}
- return MutableBigInt::TruncateAndSubFromPowerOfTwo(
- isolate, static_cast<int>(n), x, false);
- }
- // If {x} is positive and has up to {n} bits, return it directly.
- if (n >= kMaxLengthBits) return x;
- STATIC_ASSERT(kMaxLengthBits < kMaxInt - kDigitBits);
- int needed_length = static_cast<int>((n + kDigitBits - 1) / kDigitBits);
- if (x->length() < needed_length) return x;
- int bits_in_top_digit = n % kDigitBits;
- if (x->length() == needed_length) {
- if (bits_in_top_digit == 0) return x;
- digit_t top_digit = x->digit(needed_length - 1);
- if ((top_digit >> bits_in_top_digit) == 0) return x;
- }
- // Otherwise, truncate.
- DCHECK_LE(n, kMaxInt);
- return MutableBigInt::TruncateToNBits(isolate, static_cast<int>(n), x);
-}
-
-Handle<BigInt> MutableBigInt::TruncateToNBits(Isolate* isolate, int n,
- Handle<BigInt> x) {
- // Only call this when there's something to do.
- DCHECK_NE(n, 0);
- DCHECK_GT(x->length(), n / kDigitBits);
-
- int needed_digits = (n + (kDigitBits - 1)) / kDigitBits;
- DCHECK_LE(needed_digits, x->length());
- Handle<MutableBigInt> result = New(isolate, needed_digits).ToHandleChecked();
-
- // Copy all digits except the MSD.
- int last = needed_digits - 1;
- for (int i = 0; i < last; i++) {
- result->set_digit(i, x->digit(i));
- }
-
- // The MSD might contain extra bits that we don't want.
- digit_t msd = x->digit(last);
- if (n % kDigitBits != 0) {
- int drop = kDigitBits - (n % kDigitBits);
- msd = (msd << drop) >> drop;
- }
- result->set_digit(last, msd);
- result->set_sign(x->sign());
- return MakeImmutable(result);
-}
-
-// Subtracts the least significant n bits of abs(x) from 2^n.
-Handle<BigInt> MutableBigInt::TruncateAndSubFromPowerOfTwo(Isolate* isolate,
- int n,
- Handle<BigInt> x,
- bool result_sign) {
- DCHECK_NE(n, 0);
- DCHECK_LE(n, kMaxLengthBits);
-
- int needed_digits = (n + (kDigitBits - 1)) / kDigitBits;
- DCHECK_LE(needed_digits, kMaxLength); // Follows from n <= kMaxLengthBits.
- Handle<MutableBigInt> result = New(isolate, needed_digits).ToHandleChecked();
-
- // Process all digits except the MSD.
- int i = 0;
- int last = needed_digits - 1;
- int x_length = x->length();
- digit_t borrow = 0;
- // Take digits from {x} unless its length is exhausted.
- int limit = std::min(last, x_length);
- for (; i < limit; i++) {
- digit_t new_borrow = 0;
- digit_t difference = digit_sub(0, x->digit(i), &new_borrow);
- difference = digit_sub(difference, borrow, &new_borrow);
- result->set_digit(i, difference);
- borrow = new_borrow;
- }
- // Then simulate leading zeroes in {x} as needed.
- for (; i < last; i++) {
- digit_t new_borrow = 0;
- digit_t difference = digit_sub(0, borrow, &new_borrow);
- result->set_digit(i, difference);
- borrow = new_borrow;
- }
-
- // The MSD might contain extra bits that we don't want.
- digit_t msd = last < x_length ? x->digit(last) : 0;
- int msd_bits_consumed = n % kDigitBits;
- digit_t result_msd;
- if (msd_bits_consumed == 0) {
- digit_t new_borrow = 0;
- result_msd = digit_sub(0, msd, &new_borrow);
- result_msd = digit_sub(result_msd, borrow, &new_borrow);
+ int result_length = bigint::AsUintN_Neg_ResultLength(static_cast<int>(n));
+ result = MutableBigInt::New(isolate, result_length).ToHandleChecked();
+ bigint::AsUintN_Neg(GetRWDigits(result), GetDigits(x), static_cast<int>(n));
} else {
- int drop = kDigitBits - msd_bits_consumed;
- msd = (msd << drop) >> drop;
- digit_t minuend_msd = static_cast<digit_t>(1) << (kDigitBits - drop);
- digit_t new_borrow = 0;
- result_msd = digit_sub(minuend_msd, msd, &new_borrow);
- result_msd = digit_sub(result_msd, borrow, &new_borrow);
- DCHECK_EQ(new_borrow, 0); // result < 2^n.
- // If all subtracted bits were zero, we have to get rid of the
- // materialized minuend_msd again.
- result_msd &= (minuend_msd - 1);
- }
- result->set_digit(last, result_msd);
- result->set_sign(result_sign);
- return MakeImmutable(result);
+ if (n >= kMaxLengthBits) return x;
+ int result_length =
+ bigint::AsUintN_Pos_ResultLength(GetDigits(x), static_cast<int>(n));
+ if (result_length < 0) return x;
+ result = MutableBigInt::New(isolate, result_length).ToHandleChecked();
+ bigint::AsUintN_Pos(GetRWDigits(result), GetDigits(x), static_cast<int>(n));
+ }
+ DCHECK(!result->sign());
+ return MutableBigInt::MakeImmutable(result);
}
Handle<BigInt> BigInt::FromInt64(Isolate* isolate, int64_t n) {
@@ -1939,49 +1642,6 @@ uint64_t BigInt::AsUint64(bool* lossless) {
return result;
}
-// Digit arithmetic helpers.
-
-#if V8_TARGET_ARCH_32_BIT
-#define HAVE_TWODIGIT_T 1
-using twodigit_t = uint64_t;
-#elif defined(__SIZEOF_INT128__)
-// Both Clang and GCC support this on x64.
-#define HAVE_TWODIGIT_T 1
-using twodigit_t = __uint128_t;
-#endif
-
-// {carry} must point to an initialized digit_t and will either be incremented
-// by one or left alone.
-inline BigInt::digit_t MutableBigInt::digit_add(digit_t a, digit_t b,
- digit_t* carry) {
-#if HAVE_TWODIGIT_T
- twodigit_t result = static_cast<twodigit_t>(a) + static_cast<twodigit_t>(b);
- *carry += result >> kDigitBits;
- return static_cast<digit_t>(result);
-#else
- digit_t result = a + b;
- if (result < a) *carry += 1;
- return result;
-#endif
-}
-
-// {borrow} must point to an initialized digit_t and will either be incremented
-// by one or left alone.
-inline BigInt::digit_t MutableBigInt::digit_sub(digit_t a, digit_t b,
- digit_t* borrow) {
-#if HAVE_TWODIGIT_T
- twodigit_t result = static_cast<twodigit_t>(a) - static_cast<twodigit_t>(b);
- *borrow += (result >> kDigitBits) & 1;
- return static_cast<digit_t>(result);
-#else
- digit_t result = a - b;
- if (result > a) *borrow += 1;
- return static_cast<digit_t>(result);
-#endif
-}
-
-#undef HAVE_TWODIGIT_T
-
void MutableBigInt::set_64_bits(uint64_t bits) {
STATIC_ASSERT(kDigitBits == 64 || kDigitBits == 32);
if (kDigitBits == 64) {
diff --git a/deps/v8/src/objects/bigint.tq b/deps/v8/src/objects/bigint.tq
index 2d8275b2d5..3b8a4e7cc4 100644
--- a/deps/v8/src/objects/bigint.tq
+++ b/deps/v8/src/objects/bigint.tq
@@ -9,12 +9,11 @@ extern class BigIntBase extends PrimitiveHeapObject
type BigInt extends BigIntBase;
-@noVerifier
@hasSameInstanceTypeAsParent
@doNotGenerateCast
extern class MutableBigInt extends BigIntBase generates 'TNode<BigInt>';
Convert<BigInt, MutableBigInt>(i: MutableBigInt): BigInt {
- assert(bigint::IsCanonicalized(i));
+ dcheck(bigint::IsCanonicalized(i));
return %RawDownCast<BigInt>(Convert<BigIntBase>(i));
}
diff --git a/deps/v8/src/objects/cell.tq b/deps/v8/src/objects/cell.tq
index c318d40065..6817271760 100644
--- a/deps/v8/src/objects/cell.tq
+++ b/deps/v8/src/objects/cell.tq
@@ -2,7 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generatePrint
-extern class Cell extends HeapObject {
- value: Object;
-}
+extern class Cell extends HeapObject { value: Object; }
diff --git a/deps/v8/src/objects/contexts.h b/deps/v8/src/objects/contexts.h
index 81ed696cb0..d2df5395c1 100644
--- a/deps/v8/src/objects/contexts.h
+++ b/deps/v8/src/objects/contexts.h
@@ -10,7 +10,6 @@
#include "src/objects/function-kind.h"
#include "src/objects/ordered-hash-table.h"
#include "src/objects/osr-optimized-code-cache.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/contexts.tq b/deps/v8/src/objects/contexts.tq
index 83c43cc7f5..f7c0b875ef 100644
--- a/deps/v8/src/objects/contexts.tq
+++ b/deps/v8/src/objects/contexts.tq
@@ -61,11 +61,12 @@ type Slot<Container : type extends Context, T : type extends Object> extends
// slot has the right type already.
macro InitContextSlot<
ArgumentContext: type, AnnotatedContext: type, T: type, U: type>(
- context: ArgumentContext, index: Slot<AnnotatedContext, T>, value: U) {
+ context: ArgumentContext, index: Slot<AnnotatedContext, T>,
+ value: U): void {
// Make sure the arguments have the right type.
const context: AnnotatedContext = context;
const value: T = value;
- assert(TaggedEqual(context.elements[index], kInitialContextSlotValue));
+ dcheck(TaggedEqual(context.elements[index], kInitialContextSlotValue));
context.elements[index] = value;
}
@@ -179,17 +180,17 @@ macro LoadContextElement(c: Context, i: constexpr int32): Object {
}
@export
-macro StoreContextElement(c: Context, i: intptr, o: Object) {
+macro StoreContextElement(c: Context, i: intptr, o: Object): void {
c.elements[i] = o;
}
@export
-macro StoreContextElement(c: Context, i: Smi, o: Object) {
+macro StoreContextElement(c: Context, i: Smi, o: Object): void {
c.elements[i] = o;
}
@export
-macro StoreContextElement(c: Context, i: constexpr int32, o: Object) {
+macro StoreContextElement(c: Context, i: constexpr int32, o: Object): void {
c.elements[i] = o;
}
diff --git a/deps/v8/src/objects/data-handler.h b/deps/v8/src/objects/data-handler.h
index 9310824af0..6461e481f3 100644
--- a/deps/v8/src/objects/data-handler.h
+++ b/deps/v8/src/objects/data-handler.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_DATA_HANDLER_H_
#include "src/objects/struct.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/data-handler.tq b/deps/v8/src/objects/data-handler.tq
index 78bd31e536..46af326348 100644
--- a/deps/v8/src/objects/data-handler.tq
+++ b/deps/v8/src/objects/data-handler.tq
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// This class does not use the generated verifier, so if you change anything
+// here, please also update DataHandlerVerify in objects-debug.cc.
@abstract
extern class DataHandler extends Struct {
// [smi_handler]: A Smi which encodes a handler or Code object (we still
@@ -15,7 +17,7 @@ extern class DataHandler extends Struct {
validity_cell: Smi|Cell;
// Space for the following fields may or may not be allocated.
- @noVerifier data1: MaybeObject;
- @noVerifier data2: MaybeObject;
- @noVerifier data3: MaybeObject;
+ data1: MaybeObject;
+ data2: MaybeObject;
+ data3: MaybeObject;
}
diff --git a/deps/v8/src/objects/debug-objects.tq b/deps/v8/src/objects/debug-objects.tq
index 16e5cb43c6..d00b4abf4c 100644
--- a/deps/v8/src/objects/debug-objects.tq
+++ b/deps/v8/src/objects/debug-objects.tq
@@ -2,13 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generatePrint
extern class BreakPoint extends Struct {
id: Smi;
condition: String;
}
-@generatePrint
extern class BreakPointInfo extends Struct {
// The position in the source for the break position.
source_position: Smi;
@@ -32,7 +30,6 @@ bitfield struct DebuggerHints extends uint31 {
debugging_id: int32: 20 bit;
}
-@generatePrint
extern class DebugInfo extends Struct {
shared: SharedFunctionInfo;
// Bit field containing various information collected for debugging.
diff --git a/deps/v8/src/objects/descriptor-array-inl.h b/deps/v8/src/objects/descriptor-array-inl.h
index 9bb01ffc4d..387ae8d276 100644
--- a/deps/v8/src/objects/descriptor-array-inl.h
+++ b/deps/v8/src/objects/descriptor-array-inl.h
@@ -189,7 +189,7 @@ void DescriptorArray::SetDetails(InternalIndex descriptor_number,
}
int DescriptorArray::GetFieldIndex(InternalIndex descriptor_number) {
- DCHECK_EQ(GetDetails(descriptor_number).location(), kField);
+ DCHECK_EQ(GetDetails(descriptor_number).location(), PropertyLocation::kField);
return GetDetails(descriptor_number).field_index();
}
@@ -200,7 +200,7 @@ FieldType DescriptorArray::GetFieldType(InternalIndex descriptor_number) {
FieldType DescriptorArray::GetFieldType(PtrComprCageBase cage_base,
InternalIndex descriptor_number) {
- DCHECK_EQ(GetDetails(descriptor_number).location(), kField);
+ DCHECK_EQ(GetDetails(descriptor_number).location(), PropertyLocation::kField);
MaybeObject wrapped_type = GetValue(cage_base, descriptor_number);
return Map::UnwrapFieldType(wrapped_type);
}
diff --git a/deps/v8/src/objects/descriptor-array.tq b/deps/v8/src/objects/descriptor-array.tq
index a97722d4b9..9e15812cb2 100644
--- a/deps/v8/src/objects/descriptor-array.tq
+++ b/deps/v8/src/objects/descriptor-array.tq
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generatePrint
extern class EnumCache extends Struct {
keys: FixedArray;
indices: FixedArray;
diff --git a/deps/v8/src/objects/elements-kind.h b/deps/v8/src/objects/elements-kind.h
index 0d546aba8d..4599710f8b 100644
--- a/deps/v8/src/objects/elements-kind.h
+++ b/deps/v8/src/objects/elements-kind.h
@@ -58,6 +58,23 @@ namespace internal {
V(BigUint64, rab_gsab_biguint64, RAB_GSAB_BIGUINT64, uint64_t) \
V(BigInt64, rab_gsab_bigint64, RAB_GSAB_BIGINT64, int64_t)
+// Like RAB_GSAB_TYPED_ARRAYS but has an additional parameter for
+// for the corresponding non-RAB/GSAB ElementsKind.
+#define RAB_GSAB_TYPED_ARRAYS_WITH_NON_RAB_GSAB_ELEMENTS_KIND(V) \
+ V(RabGsabUint8, rab_gsab_uint8, RAB_GSAB_UINT8, uint8_t, UINT8) \
+ V(RabGsabInt8, rab_gsab_int8, RAB_GSAB_INT8, int8_t, INT8) \
+ V(RabGsabUint16, rab_gsab_uint16, RAB_GSAB_UINT16, uint16_t, UINT16) \
+ V(RabGsabInt16, rab_gsab_int16, RAB_GSAB_INT16, int16_t, INT16) \
+ V(RabGsabUint32, rab_gsab_uint32, RAB_GSAB_UINT32, uint32_t, UINT32) \
+ V(RabGsabInt32, rab_gsab_int32, RAB_GSAB_INT32, int32_t, INT32) \
+ V(RabGsabFloat32, rab_gsab_float32, RAB_GSAB_FLOAT32, float, FLOAT32) \
+ V(RabGsabFloat64, rab_gsab_float64, RAB_GSAB_FLOAT64, double, FLOAT64) \
+ V(RabGsabUint8Clamped, rab_gsab_uint8_clamped, RAB_GSAB_UINT8_CLAMPED, \
+ uint8_t, UINT8_CLAMPED) \
+ V(RabGsabBigUint64, rab_gsab_biguint64, RAB_GSAB_BIGUINT64, uint64_t, \
+ BIGUINT64) \
+ V(RabGsabBigInt64, rab_gsab_bigint64, RAB_GSAB_BIGINT64, int64_t, BIGINT64)
+
enum ElementsKind : uint8_t {
// The "fast" kind for elements that only contain SMI values. Must be first
// to make it possible to efficiently check maps for this kind.
diff --git a/deps/v8/src/objects/elements.cc b/deps/v8/src/objects/elements.cc
index 4eedf3d6c0..85386ba639 100644
--- a/deps/v8/src/objects/elements.cc
+++ b/deps/v8/src/objects/elements.cc
@@ -3327,8 +3327,6 @@ class TypedElementsAccessor
DisallowGarbageCollection no_gc;
JSTypedArray typed_array = JSTypedArray::cast(*receiver);
- // TODO(caitp): return Just(false) here when implementing strict throwing on
- // detached views.
if (typed_array.WasDetached()) {
return Just(value->IsUndefined(isolate) && length > start_from);
}
@@ -3541,7 +3539,7 @@ class TypedElementsAccessor
CHECK(!source.WasDetached());
CHECK(!destination.WasDetached());
DCHECK_LE(start, end);
- DCHECK_LE(end, source.length());
+ DCHECK_LE(end, source.GetLength());
size_t count = end - start;
DCHECK_LE(count, destination.length());
ElementType* dest_data = static_cast<ElementType*>(destination.DataPtr());
@@ -3559,6 +3557,16 @@ class TypedElementsAccessor
}
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, NON_RAB_GSAB_TYPE) \
+ case TYPE##_ELEMENTS: { \
+ ctype* source_data = reinterpret_cast<ctype*>(source.DataPtr()) + start; \
+ CopyBetweenBackingStores<NON_RAB_GSAB_TYPE##_ELEMENTS, ctype>( \
+ source_data, dest_data, count, is_shared); \
+ break; \
+ }
+ RAB_GSAB_TYPED_ARRAYS_WITH_NON_RAB_GSAB_ELEMENTS_KIND(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
default:
UNREACHABLE();
break;
diff --git a/deps/v8/src/objects/embedder-data-array-inl.h b/deps/v8/src/objects/embedder-data-array-inl.h
index 6eb1076287..d32fcfcd7b 100644
--- a/deps/v8/src/objects/embedder-data-array-inl.h
+++ b/deps/v8/src/objects/embedder-data-array-inl.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_EMBEDDER_DATA_ARRAY_INL_H_
#include "src/objects/embedder-data-array.h"
-
+#include "src/objects/heap-object-inl.h"
#include "src/objects/instance-type-inl.h"
#include "src/objects/maybe-object-inl.h"
diff --git a/deps/v8/src/objects/feedback-vector.cc b/deps/v8/src/objects/feedback-vector.cc
index f50121aa61..bc562b29a7 100644
--- a/deps/v8/src/objects/feedback-vector.cc
+++ b/deps/v8/src/objects/feedback-vector.cc
@@ -390,12 +390,13 @@ void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
Handle<Code> code,
FeedbackCell feedback_cell) {
DCHECK(CodeKindIsOptimizedJSFunction(code->kind()));
- // We should only set optimized code only when there is no valid optimized
- // code or we are tiering up.
+ // We should set optimized code only when there is no valid optimized code or
+ // we are tiering up.
DCHECK(!vector->has_optimized_code() ||
vector->optimized_code().marked_for_deoptimization() ||
(vector->optimized_code().kind() == CodeKind::TURBOPROP &&
- code->kind() == CodeKind::TURBOFAN));
+ code->kind() == CodeKind::TURBOFAN) ||
+ FLAG_stress_concurrent_inlining_attach_code);
// TODO(mythria): We could see a CompileOptimized marker here either from
// tests that use %OptimizeFunctionOnNextCall, --always-opt or because we
// re-mark the function for non-concurrent optimization after an OSR. We
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index dbf0222b84..98315ad73d 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -84,7 +84,6 @@ bool FixedArray::is_the_hole(Isolate* isolate, int index) {
return get(isolate, index).IsTheHole(isolate);
}
-#if !defined(_WIN32) || (defined(_WIN64) && _MSC_VER < 1930 && __cplusplus < 201703L)
void FixedArray::set(int index, Smi value) {
DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
@@ -92,7 +91,6 @@ void FixedArray::set(int index, Smi value) {
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(*this, offset, value);
}
-#endif
void FixedArray::set(int index, Object value) {
DCHECK_NE(GetReadOnlyRoots().fixed_cow_array_map(), map());
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index 8488371744..1dfd7dac13 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -134,20 +134,7 @@ class FixedArray
inline bool is_the_hole(Isolate* isolate, int index);
// Setter that doesn't need write barrier.
-#if !defined(_WIN32) || (defined(_WIN64) && _MSC_VER < 1930 && __cplusplus < 201703L)
inline void set(int index, Smi value);
-#else
- inline void set(int index, Smi value) {
-#if !defined(_WIN32)
- DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
-#endif
- DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
- DCHECK(Object(value).IsSmi());
- int offset = OffsetOfElementAt(index);
- RELAXED_WRITE_FIELD(*this, offset, value);
- }
-#endif
-
// Setter with explicit barrier mode.
inline void set(int index, Object value, WriteBarrierMode mode);
diff --git a/deps/v8/src/objects/fixed-array.tq b/deps/v8/src/objects/fixed-array.tq
index 3daa5bad49..c769b6b90d 100644
--- a/deps/v8/src/objects/fixed-array.tq
+++ b/deps/v8/src/objects/fixed-array.tq
@@ -86,10 +86,11 @@ extern operator '.floats[]=' macro StoreFixedDoubleArrayElement(
extern operator '.floats[]' macro LoadFixedDoubleArrayElement(
FixedDoubleArray, intptr): float64;
operator '[]=' macro StoreFixedDoubleArrayDirect(
- a: FixedDoubleArray, i: Smi, v: Number) {
+ a: FixedDoubleArray, i: Smi, v: Number): void {
a.floats[i] = Convert<float64_or_hole>(Convert<float64>(v));
}
-operator '[]=' macro StoreFixedArrayDirect(a: FixedArray, i: Smi, v: Object) {
+operator '[]=' macro StoreFixedArrayDirect(
+ a: FixedArray, i: Smi, v: Object): void {
a.objects[i] = v;
}
diff --git a/deps/v8/src/objects/heap-object.h b/deps/v8/src/objects/heap-object.h
index 94fdf7eeb1..016e0c77d3 100644
--- a/deps/v8/src/objects/heap-object.h
+++ b/deps/v8/src/objects/heap-object.h
@@ -152,6 +152,7 @@ class HeapObject : public Object {
// during marking GC.
inline ObjectSlot RawField(int byte_offset) const;
inline MaybeObjectSlot RawMaybeWeakField(int byte_offset) const;
+ inline CodeObjectSlot RawCodeField(int byte_offset) const;
DECL_CAST(HeapObject)
diff --git a/deps/v8/src/objects/instance-type.h b/deps/v8/src/objects/instance-type.h
index de90f6baa1..71f349dead 100644
--- a/deps/v8/src/objects/instance-type.h
+++ b/deps/v8/src/objects/instance-type.h
@@ -128,6 +128,9 @@ enum InstanceType : uint16_t {
FIRST_UNIQUE_NAME_TYPE = INTERNALIZED_STRING_TYPE,
LAST_UNIQUE_NAME_TYPE = SYMBOL_TYPE,
FIRST_NONSTRING_TYPE = SYMBOL_TYPE,
+ // Callable JS Functions are all JS Functions except class constructors.
+ FIRST_CALLABLE_JS_FUNCTION_TYPE = FIRST_JS_FUNCTION_TYPE,
+ LAST_CALLABLE_JS_FUNCTION_TYPE = JS_CLASS_CONSTRUCTOR_TYPE - 1,
// Boundary for testing JSReceivers that need special property lookup handling
LAST_SPECIAL_RECEIVER_TYPE = LAST_JS_SPECIAL_OBJECT_TYPE,
// Boundary case for testing JSReceivers that may have elements while having
@@ -171,6 +174,13 @@ STRING_TYPE_LIST(CHECK_STRING_RANGE)
TORQUE_ASSIGNED_INSTANCE_TYPE_LIST(CHECK_NONSTRING_RANGE)
#undef CHECK_NONSTRING_RANGE
+// classConstructor type has to be the last one in the JS Function type range.
+STATIC_ASSERT(JS_CLASS_CONSTRUCTOR_TYPE == LAST_JS_FUNCTION_TYPE);
+static_assert(JS_CLASS_CONSTRUCTOR_TYPE < FIRST_CALLABLE_JS_FUNCTION_TYPE ||
+ JS_CLASS_CONSTRUCTOR_TYPE > LAST_CALLABLE_JS_FUNCTION_TYPE,
+ "JS_CLASS_CONSTRUCTOR_TYPE must not be in the callable JS "
+ "function type range");
+
// Two ranges don't cleanly follow the inheritance hierarchy. Here we ensure
// that only expected types fall within these ranges.
// - From FIRST_JS_RECEIVER_TYPE to LAST_SPECIAL_RECEIVER_TYPE should correspond
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index 99a7d62098..7a3940d300 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -23,7 +23,9 @@
#include "src/objects/js-locale-inl.h"
#include "src/objects/js-locale.h"
#include "src/objects/js-number-format-inl.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/option-utils.h"
#include "src/objects/property-descriptor.h"
#include "src/objects/smi.h"
#include "src/objects/string.h"
@@ -181,12 +183,12 @@ const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
template <typename T>
MaybeHandle<T> New(Isolate* isolate, Handle<JSFunction> constructor,
Handle<Object> locales, Handle<Object> options,
- const char* method) {
+ const char* method_name) {
Handle<Map> map;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, map,
JSFunction::GetDerivedMap(isolate, constructor, constructor), T);
- return T::New(isolate, map, locales, options, method);
+ return T::New(isolate, map, locales, options, method_name);
}
} // namespace
@@ -427,7 +429,9 @@ std::string Intl::GetNumberingSystem(const icu::Locale& icu_locale) {
UErrorCode status = U_ZERO_ERROR;
std::unique_ptr<icu::NumberingSystem> numbering_system(
icu::NumberingSystem::createInstance(icu_locale, status));
- if (U_SUCCESS(status)) return numbering_system->getName();
+ if (U_SUCCESS(status) && !numbering_system->isAlgorithmic()) {
+ return numbering_system->getName();
+ }
return "latn";
}
@@ -652,82 +656,6 @@ MaybeHandle<Object> Intl::LegacyUnwrapReceiver(Isolate* isolate,
return receiver;
}
-Maybe<bool> Intl::GetStringOption(Isolate* isolate, Handle<JSReceiver> options,
- const char* property,
- std::vector<const char*> values,
- const char* service,
- std::unique_ptr<char[]>* result) {
- Handle<String> property_str =
- isolate->factory()->NewStringFromAsciiChecked(property);
-
- // 1. Let value be ? Get(options, property).
- Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value,
- Object::GetPropertyOrElement(isolate, options, property_str),
- Nothing<bool>());
-
- if (value->IsUndefined(isolate)) {
- return Just(false);
- }
-
- // 2. c. Let value be ? ToString(value).
- Handle<String> value_str;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value_str, Object::ToString(isolate, value), Nothing<bool>());
- std::unique_ptr<char[]> value_cstr = value_str->ToCString();
-
- // 2. d. if values is not undefined, then
- if (values.size() > 0) {
- // 2. d. i. If values does not contain an element equal to value,
- // throw a RangeError exception.
- for (size_t i = 0; i < values.size(); i++) {
- if (strcmp(values.at(i), value_cstr.get()) == 0) {
- // 2. e. return value
- *result = std::move(value_cstr);
- return Just(true);
- }
- }
-
- Handle<String> service_str =
- isolate->factory()->NewStringFromAsciiChecked(service);
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate,
- NewRangeError(MessageTemplate::kValueOutOfRange, value, service_str,
- property_str),
- Nothing<bool>());
- }
-
- // 2. e. return value
- *result = std::move(value_cstr);
- return Just(true);
-}
-
-V8_WARN_UNUSED_RESULT Maybe<bool> Intl::GetBoolOption(
- Isolate* isolate, Handle<JSReceiver> options, const char* property,
- const char* service, bool* result) {
- Handle<String> property_str =
- isolate->factory()->NewStringFromAsciiChecked(property);
-
- // 1. Let value be ? Get(options, property).
- Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value,
- Object::GetPropertyOrElement(isolate, options, property_str),
- Nothing<bool>());
-
- // 2. If value is not undefined, then
- if (!value->IsUndefined(isolate)) {
- // 2. b. i. Let value be ToBoolean(value).
- *result = value->BooleanValue(isolate);
-
- // 2. e. return value
- return Just(true);
- }
-
- return Just(false);
-}
-
namespace {
bool IsTwoLetterLanguage(const std::string& locale) {
@@ -999,9 +927,9 @@ MaybeHandle<String> Intl::StringLocaleConvertCase(Isolate* isolate,
}
}
-MaybeHandle<Object> Intl::StringLocaleCompare(
+base::Optional<int> Intl::StringLocaleCompare(
Isolate* isolate, Handle<String> string1, Handle<String> string2,
- Handle<Object> locales, Handle<Object> options, const char* method) {
+ Handle<Object> locales, Handle<Object> options, const char* method_name) {
// We only cache the instance when locales is a string/undefined and
// options is undefined, as that is the only case when the specified
// side-effects of examining those arguments are unobservable.
@@ -1025,9 +953,9 @@ MaybeHandle<Object> Intl::StringLocaleCompare(
isolate);
Handle<JSCollator> collator;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, collator,
- New<JSCollator>(isolate, constructor, locales, options, method), Object);
+ MaybeHandle<JSCollator> maybe_collator =
+ New<JSCollator>(isolate, constructor, locales, options, method_name);
+ if (!maybe_collator.ToHandle(&collator)) return {};
if (can_cache) {
isolate->set_icu_object_in_cache(
Isolate::ICUObjectCacheType::kDefaultCollator, locales,
@@ -1038,26 +966,19 @@ MaybeHandle<Object> Intl::StringLocaleCompare(
}
// ecma402/#sec-collator-comparestrings
-Handle<Object> Intl::CompareStrings(Isolate* isolate,
- const icu::Collator& icu_collator,
- Handle<String> string1,
- Handle<String> string2) {
- Factory* factory = isolate->factory();
-
+int Intl::CompareStrings(Isolate* isolate, const icu::Collator& icu_collator,
+ Handle<String> string1, Handle<String> string2) {
// Early return for identical strings.
if (string1.is_identical_to(string2)) {
- return factory->NewNumberFromInt(UCollationResult::UCOL_EQUAL);
+ return UCollationResult::UCOL_EQUAL;
}
// Early return for empty strings.
if (string1->length() == 0) {
- return factory->NewNumberFromInt(string2->length() == 0
- ? UCollationResult::UCOL_EQUAL
- : UCollationResult::UCOL_LESS);
- }
- if (string2->length() == 0) {
- return factory->NewNumberFromInt(UCollationResult::UCOL_GREATER);
+ return string2->length() == 0 ? UCollationResult::UCOL_EQUAL
+ : UCollationResult::UCOL_LESS;
}
+ if (string2->length() == 0) return UCollationResult::UCOL_GREATER;
string1 = String::Flatten(isolate, string1);
string2 = String::Flatten(isolate, string2);
@@ -1070,7 +991,7 @@ Handle<Object> Intl::CompareStrings(Isolate* isolate,
if (!string_piece2.empty()) {
result = icu_collator.compareUTF8(string_piece1, string_piece2, status);
DCHECK(U_SUCCESS(status));
- return factory->NewNumberFromInt(result);
+ return result;
}
}
@@ -1078,8 +999,7 @@ Handle<Object> Intl::CompareStrings(Isolate* isolate,
icu::UnicodeString string_val2 = Intl::ToICUUnicodeString(isolate, string2);
result = icu_collator.compare(string_val1, string_val2, status);
DCHECK(U_SUCCESS(status));
-
- return factory->NewNumberFromInt(result);
+ return result;
}
// ecma402/#sup-properties-of-the-number-prototype-object
@@ -1087,7 +1007,7 @@ MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate,
Handle<Object> num,
Handle<Object> locales,
Handle<Object> options,
- const char* method) {
+ const char* method_name) {
Handle<Object> numeric_obj;
ASSIGN_RETURN_ON_EXCEPTION(isolate, numeric_obj,
Object::ToNumeric(isolate, num), String);
@@ -1117,7 +1037,7 @@ MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate,
// 2. Let numberFormat be ? Construct(%NumberFormat%, « locales, options »).
ASSIGN_RETURN_ON_EXCEPTION(
isolate, number_format,
- New<JSNumberFormat>(isolate, constructor, locales, options, method),
+ New<JSNumberFormat>(isolate, constructor, locales, options, method_name),
String);
if (can_cache) {
@@ -1134,55 +1054,6 @@ MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate,
numeric_obj);
}
-namespace {
-
-// ecma402/#sec-defaultnumberoption
-Maybe<int> DefaultNumberOption(Isolate* isolate, Handle<Object> value, int min,
- int max, int fallback, Handle<String> property) {
- // 2. Else, return fallback.
- if (value->IsUndefined()) return Just(fallback);
-
- // 1. If value is not undefined, then
- // a. Let value be ? ToNumber(value).
- Handle<Object> value_num;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value_num, Object::ToNumber(isolate, value), Nothing<int>());
- DCHECK(value_num->IsNumber());
-
- // b. If value is NaN or less than minimum or greater than maximum, throw a
- // RangeError exception.
- if (value_num->IsNaN() || value_num->Number() < min ||
- value_num->Number() > max) {
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate,
- NewRangeError(MessageTemplate::kPropertyValueOutOfRange, property),
- Nothing<int>());
- }
-
- // The max and min arguments are integers and the above check makes
- // sure that we are within the integer range making this double to
- // int conversion safe.
- //
- // c. Return floor(value).
- return Just(FastD2I(floor(value_num->Number())));
-}
-
-} // namespace
-
-// ecma402/#sec-getnumberoption
-Maybe<int> Intl::GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
- Handle<String> property, int min, int max,
- int fallback) {
- // 1. Let value be ? Get(options, property).
- Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value, JSReceiver::GetProperty(isolate, options, property),
- Nothing<int>());
-
- // Return ? DefaultNumberOption(value, minimum, maximum, fallback).
- return DefaultNumberOption(isolate, value, min, max, fallback, property);
-}
-
Maybe<Intl::NumberFormatDigitOptions> Intl::SetNumberFormatDigitOptions(
Isolate* isolate, Handle<JSReceiver> options, int mnfd_default,
int mxfd_default, bool notation_is_compact) {
@@ -1192,8 +1063,8 @@ Maybe<Intl::NumberFormatDigitOptions> Intl::SetNumberFormatDigitOptions(
// 5. Let mnid be ? GetNumberOption(options, "minimumIntegerDigits,", 1, 21,
// 1).
int mnid = 1;
- if (!Intl::GetNumberOption(isolate, options,
- factory->minimumIntegerDigits_string(), 1, 21, 1)
+ if (!GetNumberOption(isolate, options, factory->minimumIntegerDigits_string(),
+ 1, 21, 1)
.To(&mnid)) {
return Nothing<NumberFormatDigitOptions>();
}
@@ -1613,7 +1484,7 @@ MaybeHandle<JSArray> CreateArrayFromList(Isolate* isolate,
// ECMA 402 9.2.9 SupportedLocales(availableLocales, requestedLocales, options)
// https://tc39.github.io/ecma402/#sec-supportedlocales
MaybeHandle<JSObject> SupportedLocales(
- Isolate* isolate, const char* method,
+ Isolate* isolate, const char* method_name,
const std::set<std::string>& available_locales,
const std::vector<std::string>& requested_locales, Handle<Object> options) {
std::vector<std::string> supported_locales;
@@ -1622,12 +1493,12 @@ MaybeHandle<JSObject> SupportedLocales(
Handle<JSReceiver> options_obj;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, options_obj,
- Intl::CoerceOptionsToObject(isolate, options, method), JSObject);
+ CoerceOptionsToObject(isolate, options, method_name), JSObject);
// 2. Let matcher be ? GetOption(options, "localeMatcher", "string",
// « "lookup", "best fit" », "best fit").
Maybe<Intl::MatcherOption> maybe_locale_matcher =
- Intl::GetLocaleMatcher(isolate, options_obj, method);
+ Intl::GetLocaleMatcher(isolate, options_obj, method_name);
MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSObject>());
Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
@@ -1701,8 +1572,18 @@ MaybeHandle<JSArray> AvailableCurrencies(Isolate* isolate) {
std::vector<std::string> array;
while (U_SUCCESS(status) &&
(next = uenum_next(ids, nullptr, &status)) != nullptr) {
+ // Work around the issue that we do not support VEF currency code
+ // in DisplayNames by not reporting it.
+ if (strcmp(next, "VEF") == 0) continue;
array.push_back(next);
}
+ // Work around the issue that we do support the following currency codes
+ // in DisplayNames but the ICU API does not reporting it.
+ array.push_back("SVC");
+ array.push_back("VES");
+ array.push_back("XDR");
+ array.push_back("XSU");
+ array.push_back("ZWL");
std::sort(array.begin(), array.end());
uenum_close(ids);
return VectorToJSArray(isolate, array);
@@ -1807,7 +1688,7 @@ MaybeHandle<JSArray> Intl::SupportedValuesOf(Isolate* isolate,
// ECMA 402 Intl.*.supportedLocalesOf
MaybeHandle<JSObject> Intl::SupportedLocalesOf(
- Isolate* isolate, const char* method,
+ Isolate* isolate, const char* method_name,
const std::set<std::string>& available_locales, Handle<Object> locales,
Handle<Object> options) {
// Let availableLocales be %Collator%.[[AvailableLocales]].
@@ -1818,7 +1699,7 @@ MaybeHandle<JSObject> Intl::SupportedLocalesOf(
MAYBE_RETURN(requested_locales, MaybeHandle<JSObject>());
// Return ? SupportedLocales(availableLocales, requestedLocales, options).
- return SupportedLocales(isolate, method, available_locales,
+ return SupportedLocales(isolate, method_name, available_locales,
requested_locales.FromJust(), options);
}
@@ -1878,7 +1759,8 @@ bool Intl::IsValidNumberingSystem(const std::string& value) {
UErrorCode status = U_ZERO_ERROR;
std::unique_ptr<icu::NumberingSystem> numbering_system(
icu::NumberingSystem::createInstanceByName(value.c_str(), status));
- return U_SUCCESS(status) && numbering_system.get() != nullptr;
+ return U_SUCCESS(status) && numbering_system.get() != nullptr &&
+ !numbering_system->isAlgorithmic();
}
namespace {
@@ -2229,20 +2111,20 @@ base::TimezoneCache* Intl::CreateTimeZoneCache() {
Maybe<Intl::MatcherOption> Intl::GetLocaleMatcher(Isolate* isolate,
Handle<JSReceiver> options,
- const char* method) {
- return Intl::GetStringOption<Intl::MatcherOption>(
- isolate, options, "localeMatcher", method, {"best fit", "lookup"},
+ const char* method_name) {
+ return GetStringOption<Intl::MatcherOption>(
+ isolate, options, "localeMatcher", method_name, {"best fit", "lookup"},
{Intl::MatcherOption::kBestFit, Intl::MatcherOption::kLookup},
Intl::MatcherOption::kBestFit);
}
Maybe<bool> Intl::GetNumberingSystem(Isolate* isolate,
Handle<JSReceiver> options,
- const char* method,
+ const char* method_name,
std::unique_ptr<char[]>* result) {
const std::vector<const char*> empty_values = {};
- Maybe<bool> maybe = Intl::GetStringOption(isolate, options, "numberingSystem",
- empty_values, method, result);
+ Maybe<bool> maybe = GetStringOption(isolate, options, "numberingSystem",
+ empty_values, method_name, result);
MAYBE_RETURN(maybe, Nothing<bool>());
if (maybe.FromJust() && *result != nullptr) {
if (!IsWellFormedNumberingSystem(result->get())) {
@@ -2351,41 +2233,6 @@ MaybeHandle<String> Intl::FormattedToString(
return Intl::ToString(isolate, result);
}
-// ecma402/#sec-getoptionsobject
-MaybeHandle<JSReceiver> Intl::GetOptionsObject(Isolate* isolate,
- Handle<Object> options,
- const char* service) {
- // 1. If options is undefined, then
- if (options->IsUndefined(isolate)) {
- // a. Return ! ObjectCreate(null).
- return isolate->factory()->NewJSObjectWithNullProto();
- }
- // 2. If Type(options) is Object, then
- if (options->IsJSReceiver()) {
- // a. Return options.
- return Handle<JSReceiver>::cast(options);
- }
- // 3. Throw a TypeError exception.
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kInvalidArgument),
- JSReceiver);
-}
-
-// ecma402/#sec-coerceoptionstoobject
-MaybeHandle<JSReceiver> Intl::CoerceOptionsToObject(Isolate* isolate,
- Handle<Object> options,
- const char* service) {
- // 1. If options is undefined, then
- if (options->IsUndefined(isolate)) {
- // a. Return ! ObjectCreate(null).
- return isolate->factory()->NewJSObjectWithNullProto();
- }
- // 2. Return ? ToObject(options).
- ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
- Object::ToObject(isolate, options, service),
- JSReceiver);
- return Handle<JSReceiver>::cast(options);
-}
-
MaybeHandle<JSArray> Intl::ToJSArray(
Isolate* isolate, const char* unicode_key,
icu::StringEnumeration* enumeration,
diff --git a/deps/v8/src/objects/intl-objects.h b/deps/v8/src/objects/intl-objects.h
index 122ca4b746..a696e09410 100644
--- a/deps/v8/src/objects/intl-objects.h
+++ b/deps/v8/src/objects/intl-objects.h
@@ -62,76 +62,10 @@ class Intl {
static std::string GetNumberingSystem(const icu::Locale& icu_locale);
static V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> SupportedLocalesOf(
- Isolate* isolate, const char* method,
+ Isolate* isolate, const char* method_name,
const std::set<std::string>& available_locales, Handle<Object> locales_in,
Handle<Object> options_in);
- // ECMA402 9.2.10. GetOption( options, property, type, values, fallback)
- // ecma402/#sec-getoption
- //
- // This is specialized for the case when type is string.
- //
- // Instead of passing undefined for the values argument as the spec
- // defines, pass in an empty vector.
- //
- // Returns true if options object has the property and stores the
- // result in value. Returns false if the value is not found. The
- // caller is required to use fallback value appropriately in this
- // case.
- //
- // service is a string denoting the type of Intl object; used when
- // printing the error message.
- V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static Maybe<bool> GetStringOption(
- Isolate* isolate, Handle<JSReceiver> options, const char* property,
- std::vector<const char*> values, const char* service,
- std::unique_ptr<char[]>* result);
-
- // A helper template to get string from option into a enum.
- // The enum in the enum_values is the corresponding value to the strings
- // in the str_values. If the option does not contains name,
- // default_value will be return.
- template <typename T>
- V8_WARN_UNUSED_RESULT static Maybe<T> GetStringOption(
- Isolate* isolate, Handle<JSReceiver> options, const char* name,
- const char* method, const std::vector<const char*>& str_values,
- const std::vector<T>& enum_values, T default_value) {
- DCHECK_EQ(str_values.size(), enum_values.size());
- std::unique_ptr<char[]> cstr;
- Maybe<bool> found = Intl::GetStringOption(isolate, options, name,
- str_values, method, &cstr);
- MAYBE_RETURN(found, Nothing<T>());
- if (found.FromJust()) {
- DCHECK_NOT_NULL(cstr.get());
- for (size_t i = 0; i < str_values.size(); i++) {
- if (strcmp(cstr.get(), str_values[i]) == 0) {
- return Just(enum_values[i]);
- }
- }
- UNREACHABLE();
- }
- return Just(default_value);
- }
-
- // ECMA402 9.2.10. GetOption( options, property, type, values, fallback)
- // ecma402/#sec-getoption
- //
- // This is specialized for the case when type is boolean.
- //
- // Returns true if options object has the property and stores the
- // result in value. Returns false if the value is not found. The
- // caller is required to use fallback value appropriately in this
- // case.
- //
- // service is a string denoting the type of Intl object; used when
- // printing the error message.
- V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static Maybe<bool> GetBoolOption(
- Isolate* isolate, Handle<JSReceiver> options, const char* property,
- const char* service, bool* result);
-
- V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static Maybe<int> GetNumberOption(
- Isolate* isolate, Handle<JSReceiver> options, Handle<String> property,
- int min, int max, int fallback);
-
// https://tc39.github.io/ecma402/#sec-canonicalizelocalelist
// {only_return_one_result} is an optimization for callers that only
// care about the first result.
@@ -158,18 +92,19 @@ class Intl {
V8_WARN_UNUSED_RESULT static MaybeHandle<String> ConvertToLower(
Isolate* isolate, Handle<String> s);
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object> StringLocaleCompare(
+ V8_WARN_UNUSED_RESULT static base::Optional<int> StringLocaleCompare(
Isolate* isolate, Handle<String> s1, Handle<String> s2,
- Handle<Object> locales, Handle<Object> options, const char* method);
+ Handle<Object> locales, Handle<Object> options, const char* method_name);
- V8_WARN_UNUSED_RESULT static Handle<Object> CompareStrings(
- Isolate* isolate, const icu::Collator& collator, Handle<String> s1,
- Handle<String> s2);
+ V8_WARN_UNUSED_RESULT static int CompareStrings(Isolate* isolate,
+ const icu::Collator& collator,
+ Handle<String> s1,
+ Handle<String> s2);
// ecma402/#sup-properties-of-the-number-prototype-object
V8_WARN_UNUSED_RESULT static MaybeHandle<String> NumberToLocaleString(
Isolate* isolate, Handle<Object> num, Handle<Object> locales,
- Handle<Object> options, const char* method);
+ Handle<Object> options, const char* method_name);
// ecma402/#sec-setnfdigitoptions
struct NumberFormatDigitOptions {
@@ -237,11 +172,11 @@ class Intl {
// Shared function to read the "localeMatcher" option.
V8_WARN_UNUSED_RESULT static Maybe<MatcherOption> GetLocaleMatcher(
- Isolate* isolate, Handle<JSReceiver> options, const char* method);
+ Isolate* isolate, Handle<JSReceiver> options, const char* method_name);
// Shared function to read the "numberingSystem" option.
V8_WARN_UNUSED_RESULT static Maybe<bool> GetNumberingSystem(
- Isolate* isolate, Handle<JSReceiver> options, const char* method,
+ Isolate* isolate, Handle<JSReceiver> options, const char* method_name,
std::unique_ptr<char[]>* result);
// Check the calendar is valid or not for that locale.
@@ -336,14 +271,6 @@ class Intl {
static const std::set<std::string>& GetAvailableLocalesForDateFormat();
- // ecma402/#sec-getoptionsobject
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> GetOptionsObject(
- Isolate* isolate, Handle<Object> options, const char* service);
-
- // ecma402/#sec-coerceoptionstoobject
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> CoerceOptionsToObject(
- Isolate* isolate, Handle<Object> options, const char* service);
-
V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> ToJSArray(
Isolate* isolate, const char* unicode_key,
icu::StringEnumeration* enumeration,
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index e1de03dcf9..67ae6b7877 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -42,7 +42,7 @@ DEF_GETTER(JSArrayBuffer, backing_store, void*) {
return reinterpret_cast<void*>(ReadField<Address>(kBackingStoreOffset));
}
-void JSArrayBuffer::set_backing_store(Isolate* isolate, void* value) {
+void JSArrayBuffer::set_backing_store(void* value) {
DCHECK(IsValidBackingStorePointer(value));
WriteField<Address>(kBackingStoreOffset, reinterpret_cast<Address>(value));
}
@@ -195,13 +195,13 @@ size_t JSTypedArray::GetLengthOrOutOfBounds(bool& out_of_bounds) const {
if (WasDetached()) return 0;
if (is_length_tracking()) {
if (is_backed_by_rab()) {
- if (byte_offset() >= buffer().byte_length()) {
+ if (byte_offset() > buffer().byte_length()) {
out_of_bounds = true;
return 0;
}
return (buffer().byte_length() - byte_offset()) / element_size();
}
- if (byte_offset() >=
+ if (byte_offset() >
buffer().GetBackingStore()->byte_length(std::memory_order_seq_cst)) {
out_of_bounds = true;
return 0;
diff --git a/deps/v8/src/objects/js-array-buffer.cc b/deps/v8/src/objects/js-array-buffer.cc
index bbe635ee2a..07b37dd7f5 100644
--- a/deps/v8/src/objects/js-array-buffer.cc
+++ b/deps/v8/src/objects/js-array-buffer.cc
@@ -56,7 +56,7 @@ void JSArrayBuffer::Setup(SharedFlag shared, ResizableFlag resizable,
}
set_extension(nullptr);
if (!backing_store) {
- set_backing_store(GetIsolate(), nullptr);
+ set_backing_store(nullptr);
set_byte_length(0);
set_max_byte_length(0);
} else {
@@ -76,8 +76,7 @@ void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) {
!backing_store->is_wasm_memory() && !backing_store->is_resizable(),
backing_store->byte_length() == backing_store->max_byte_length());
DCHECK(!was_detached());
- Isolate* isolate = GetIsolate();
- set_backing_store(isolate, backing_store->buffer_start());
+ set_backing_store(backing_store->buffer_start());
if (is_shared() && is_resizable()) {
// GSABs need to read their byte_length from the BackingStore. Maintain the
// invariant that their byte_length field is always 0.
@@ -88,12 +87,11 @@ void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) {
set_max_byte_length(backing_store->max_byte_length());
if (backing_store->is_wasm_memory()) set_is_detachable(false);
if (!backing_store->free_on_destruct()) set_is_external(true);
- Heap* heap = isolate->heap();
ArrayBufferExtension* extension = EnsureExtension();
size_t bytes = backing_store->PerIsolateAccountingLength();
extension->set_accounting_length(bytes);
extension->set_backing_store(std::move(backing_store));
- heap->AppendArrayBufferExtension(*this, extension);
+ GetIsolate()->heap()->AppendArrayBufferExtension(*this, extension);
}
void JSArrayBuffer::Detach(bool force_for_wasm_memory) {
@@ -122,14 +120,25 @@ void JSArrayBuffer::Detach(bool force_for_wasm_memory) {
DCHECK(!is_shared());
DCHECK(!is_asmjs_memory());
- set_backing_store(isolate, nullptr);
+ set_backing_store(nullptr);
set_byte_length(0);
set_was_detached(true);
}
-std::shared_ptr<BackingStore> JSArrayBuffer::GetBackingStore() {
- if (!extension()) return nullptr;
- return extension()->backing_store();
+std::shared_ptr<BackingStore> JSArrayBuffer::GetBackingStore() const {
+ if (!extension()) return nullptr;
+ return extension()->backing_store();
+}
+
+size_t JSArrayBuffer::GetByteLength() const {
+ if V8_UNLIKELY (is_shared() && is_resizable()) {
+ // Invariant: byte_length for GSAB is 0 (it needs to be read from the
+ // BackingStore).
+ DCHECK_EQ(0, byte_length());
+
+ return GetBackingStore()->byte_length(std::memory_order_seq_cst);
+ }
+ return byte_length();
}
ArrayBufferExtension* JSArrayBuffer::EnsureExtension() {
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index e5a68f3923..dadc85659b 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -38,7 +38,7 @@ class JSArrayBuffer
// [backing_store]: backing memory for this array
DECL_GETTER(backing_store, void*)
- inline void set_backing_store(Isolate* isolate, void* value);
+ inline void set_backing_store(void* value);
// [extension]: extension object used for GC
DECL_PRIMITIVE_ACCESSORS(extension, ArrayBufferExtension*)
@@ -104,7 +104,9 @@ class JSArrayBuffer
// Get a reference to backing store of this array buffer, if there is a
// backing store. Returns nullptr if there is no backing store (e.g. detached
// or a zero-length array buffer).
- std::shared_ptr<BackingStore> GetBackingStore();
+ std::shared_ptr<BackingStore> GetBackingStore() const;
+
+ size_t GetByteLength() const;
// Allocates an ArrayBufferExtension for this array buffer, unless it is
// already associated with an extension.
@@ -160,52 +162,27 @@ class JSArrayBuffer
// extension-object. The GC periodically iterates all extensions concurrently
// and frees unmarked ones.
// https://docs.google.com/document/d/1-ZrLdlFX1nXT3z-FAgLbKal1gI8Auiaya_My-a0UJ28/edit
-class ArrayBufferExtension : public Malloced {
- enum class GcState : uint8_t { Dead = 0, Copied, Promoted };
-
- std::atomic<bool> marked_;
- std::atomic<GcState> young_gc_state_;
- std::shared_ptr<BackingStore> backing_store_;
- ArrayBufferExtension* next_;
- std::atomic<size_t> accounting_length_;
-
- GcState young_gc_state() {
- return young_gc_state_.load(std::memory_order_relaxed);
- }
-
- void set_young_gc_state(GcState value) {
- young_gc_state_.store(value, std::memory_order_relaxed);
- }
-
+class ArrayBufferExtension final : public Malloced {
public:
- ArrayBufferExtension()
- : marked_(false),
- young_gc_state_(GcState::Dead),
- backing_store_(std::shared_ptr<BackingStore>()),
- next_(nullptr),
- accounting_length_(0) {}
+ ArrayBufferExtension() : backing_store_(std::shared_ptr<BackingStore>()) {}
explicit ArrayBufferExtension(std::shared_ptr<BackingStore> backing_store)
- : marked_(false),
- young_gc_state_(GcState::Dead),
- backing_store_(backing_store),
- next_(nullptr),
- accounting_length_(0) {}
+ : backing_store_(backing_store) {}
void Mark() { marked_.store(true, std::memory_order_relaxed); }
void Unmark() { marked_.store(false, std::memory_order_relaxed); }
- bool IsMarked() { return marked_.load(std::memory_order_relaxed); }
+ bool IsMarked() const { return marked_.load(std::memory_order_relaxed); }
void YoungMark() { set_young_gc_state(GcState::Copied); }
void YoungMarkPromoted() { set_young_gc_state(GcState::Promoted); }
void YoungUnmark() { set_young_gc_state(GcState::Dead); }
- bool IsYoungMarked() { return young_gc_state() != GcState::Dead; }
+ bool IsYoungMarked() const { return young_gc_state() != GcState::Dead; }
- bool IsYoungPromoted() { return young_gc_state() == GcState::Promoted; }
+ bool IsYoungPromoted() const { return young_gc_state() == GcState::Promoted; }
std::shared_ptr<BackingStore> backing_store() { return backing_store_; }
BackingStore* backing_store_raw() { return backing_store_.get(); }
- size_t accounting_length() {
+ size_t accounting_length() const {
return accounting_length_.load(std::memory_order_relaxed);
}
@@ -227,8 +204,25 @@ class ArrayBufferExtension : public Malloced {
void reset_backing_store() { backing_store_.reset(); }
- ArrayBufferExtension* next() { return next_; }
+ ArrayBufferExtension* next() const { return next_; }
void set_next(ArrayBufferExtension* extension) { next_ = extension; }
+
+ private:
+ enum class GcState : uint8_t { Dead = 0, Copied, Promoted };
+
+ std::atomic<bool> marked_{false};
+ std::atomic<GcState> young_gc_state_{GcState::Dead};
+ std::shared_ptr<BackingStore> backing_store_;
+ ArrayBufferExtension* next_ = nullptr;
+ std::atomic<size_t> accounting_length_{0};
+
+ GcState young_gc_state() const {
+ return young_gc_state_.load(std::memory_order_relaxed);
+ }
+
+ void set_young_gc_state(GcState value) {
+ young_gc_state_.store(value, std::memory_order_relaxed);
+ }
};
class JSArrayBufferView
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index 4d725a0905..2cd2e3f309 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -8,7 +8,6 @@
#include "src/objects/allocation-site.h"
#include "src/objects/fixed-array.h"
#include "src/objects/js-objects.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-array.tq b/deps/v8/src/objects/js-array.tq
index e9f7d86c44..7e6103293e 100644
--- a/deps/v8/src/objects/js-array.tq
+++ b/deps/v8/src/objects/js-array.tq
@@ -187,7 +187,7 @@ struct FastJSArrayWitness {
return this.unstable;
}
- macro Recheck() labels CastError {
+ macro Recheck(): void labels CastError {
if (this.stable.map != this.map) goto CastError;
// We don't need to check elements kind or whether the prototype
// has changed away from the default JSArray prototype, because
@@ -210,7 +210,7 @@ struct FastJSArrayWitness {
}
}
- macro StoreHole(k: Smi) {
+ macro StoreHole(k: Smi): void {
if (this.hasDoubles) {
const elements = Cast<FixedDoubleArray>(this.unstable.elements)
otherwise unreachable;
@@ -230,19 +230,19 @@ struct FastJSArrayWitness {
}
}
- macro EnsureArrayPushable(implicit context: Context)() labels Failed {
+ macro EnsureArrayPushable(implicit context: Context)(): void labels Failed {
EnsureArrayPushable(this.map) otherwise Failed;
array::EnsureWriteableFastElements(this.unstable);
this.arrayIsPushable = true;
}
- macro ChangeLength(newLength: Smi) {
- assert(this.arrayIsPushable);
+ macro ChangeLength(newLength: Smi): void {
+ dcheck(this.arrayIsPushable);
this.unstable.length = newLength;
}
- macro Push(value: JSAny) labels Failed {
- assert(this.arrayIsPushable);
+ macro Push(value: JSAny): void labels Failed {
+ dcheck(this.arrayIsPushable);
if (this.hasDoubles) {
BuildAppendJSArray(
ElementsKind::HOLEY_DOUBLE_ELEMENTS, this.unstable, value)
@@ -251,7 +251,7 @@ struct FastJSArrayWitness {
BuildAppendJSArray(ElementsKind::HOLEY_SMI_ELEMENTS, this.unstable, value)
otherwise Failed;
} else {
- assert(
+ dcheck(
this.map.elements_kind == ElementsKind::HOLEY_ELEMENTS ||
this.map.elements_kind == ElementsKind::PACKED_ELEMENTS);
BuildAppendJSArray(ElementsKind::HOLEY_ELEMENTS, this.unstable, value)
@@ -259,8 +259,8 @@ struct FastJSArrayWitness {
}
}
- macro MoveElements(dst: intptr, src: intptr, length: intptr) {
- assert(this.arrayIsPushable);
+ macro MoveElements(dst: intptr, src: intptr, length: intptr): void {
+ dcheck(this.arrayIsPushable);
if (this.hasDoubles) {
const elements: FixedDoubleArray =
Cast<FixedDoubleArray>(this.unstable.elements)
@@ -303,7 +303,7 @@ struct FastJSArrayForReadWitness {
return this.unstable;
}
- macro Recheck() labels CastError {
+ macro Recheck(): void labels CastError {
if (this.stable.map != this.map) goto CastError;
// We don't need to check elements kind or whether the prototype
// has changed away from the default JSArray prototype, because
diff --git a/deps/v8/src/objects/js-break-iterator.cc b/deps/v8/src/objects/js-break-iterator.cc
index d8794b02ce..c9558d5c78 100644
--- a/deps/v8/src/objects/js-break-iterator.cc
+++ b/deps/v8/src/objects/js-break-iterator.cc
@@ -10,6 +10,8 @@
#include "src/objects/intl-objects.h"
#include "src/objects/js-break-iterator-inl.h"
+#include "src/objects/managed-inl.h"
+#include "src/objects/option-utils.h"
#include "unicode/brkiter.h"
namespace v8 {
@@ -56,7 +58,7 @@ MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::New(
Intl::ResolvedLocale r = maybe_resolve_locale.FromJust();
// Extract type from options
- Maybe<Type> maybe_type = Intl::GetStringOption<Type>(
+ Maybe<Type> maybe_type = GetStringOption<Type>(
isolate, options, "type", service,
{"word", "character", "sentence", "line"},
{Type::WORD, Type::CHARACTER, Type::SENTENCE, Type::LINE}, Type::WORD);
diff --git a/deps/v8/src/objects/js-break-iterator.h b/deps/v8/src/objects/js-break-iterator.h
index 92104084ad..6fc02c856b 100644
--- a/deps/v8/src/objects/js-break-iterator.h
+++ b/deps/v8/src/objects/js-break-iterator.h
@@ -15,7 +15,6 @@
#include "src/objects/intl-objects.h"
#include "src/objects/managed.h"
#include "src/objects/objects.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-collator.cc b/deps/v8/src/objects/js-collator.cc
index be3541f29d..dea0ce0422 100644
--- a/deps/v8/src/objects/js-collator.cc
+++ b/deps/v8/src/objects/js-collator.cc
@@ -11,7 +11,9 @@
#include "src/execution/isolate.h"
#include "src/objects/js-collator-inl.h"
#include "src/objects/js-locale.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/option-utils.h"
#include "unicode/coll.h"
#include "unicode/locid.h"
#include "unicode/strenum.h"
@@ -42,9 +44,9 @@ enum class Sensitivity {
enum class CaseFirst { kUndefined, kUpper, kLower, kFalse };
Maybe<CaseFirst> GetCaseFirst(Isolate* isolate, Handle<JSReceiver> options,
- const char* method) {
- return Intl::GetStringOption<CaseFirst>(
- isolate, options, "caseFirst", method, {"upper", "lower", "false"},
+ const char* method_name) {
+ return GetStringOption<CaseFirst>(
+ isolate, options, "caseFirst", method_name, {"upper", "lower", "false"},
{CaseFirst::kUpper, CaseFirst::kLower, CaseFirst::kFalse},
CaseFirst::kUndefined);
}
@@ -286,12 +288,12 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
// 2. Set options to ? CoerceOptionsToObject(options).
Handle<JSReceiver> options;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, options,
- Intl::CoerceOptionsToObject(isolate, options_obj, service), JSCollator);
+ isolate, options, CoerceOptionsToObject(isolate, options_obj, service),
+ JSCollator);
// 4. Let usage be ? GetOption(options, "usage", "string", « "sort",
// "search" », "sort").
- Maybe<Usage> maybe_usage = Intl::GetStringOption<Usage>(
+ Maybe<Usage> maybe_usage = GetStringOption<Usage>(
isolate, options, "usage", service, {"sort", "search"},
{Usage::SORT, Usage::SEARCH}, Usage::SORT);
MAYBE_RETURN(maybe_usage, MaybeHandle<JSCollator>());
@@ -309,7 +311,7 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
// *undefined*, *undefined*).
std::unique_ptr<char[]> collation_str = nullptr;
const std::vector<const char*> empty_values = {};
- Maybe<bool> maybe_collation = Intl::GetStringOption(
+ Maybe<bool> maybe_collation = GetStringOption(
isolate, options, "collation", empty_values, service, &collation_str);
MAYBE_RETURN(maybe_collation, MaybeHandle<JSCollator>());
// x. If _collation_ is not *undefined*, then
@@ -334,13 +336,13 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
// a. Let numeric be ! ToString(numeric).
//
// Note: We omit the ToString(numeric) operation as it's not
- // observable. Intl::GetBoolOption returns a Boolean and
+ // observable. GetBoolOption returns a Boolean and
// ToString(Boolean) is not side-effecting.
//
// 13. Set opt.[[kn]] to numeric.
bool numeric;
Maybe<bool> found_numeric =
- Intl::GetBoolOption(isolate, options, "numeric", service, &numeric);
+ GetBoolOption(isolate, options, "numeric", service, &numeric);
MAYBE_RETURN(found_numeric, MaybeHandle<JSCollator>());
// 14. Let caseFirst be ? GetOption(options, "caseFirst", "string",
@@ -477,12 +479,12 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
// 24. Let sensitivity be ? GetOption(options, "sensitivity",
// "string", « "base", "accent", "case", "variant" », undefined).
- Maybe<Sensitivity> maybe_sensitivity = Intl::GetStringOption<Sensitivity>(
- isolate, options, "sensitivity", service,
- {"base", "accent", "case", "variant"},
- {Sensitivity::kBase, Sensitivity::kAccent, Sensitivity::kCase,
- Sensitivity::kVariant},
- Sensitivity::kUndefined);
+ Maybe<Sensitivity> maybe_sensitivity =
+ GetStringOption<Sensitivity>(isolate, options, "sensitivity", service,
+ {"base", "accent", "case", "variant"},
+ {Sensitivity::kBase, Sensitivity::kAccent,
+ Sensitivity::kCase, Sensitivity::kVariant},
+ Sensitivity::kUndefined);
MAYBE_RETURN(maybe_sensitivity, MaybeHandle<JSCollator>());
Sensitivity sensitivity = maybe_sensitivity.FromJust();
@@ -518,7 +520,7 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
// 27.Let ignorePunctuation be ? GetOption(options,
// "ignorePunctuation", "boolean", undefined, false).
bool ignore_punctuation;
- Maybe<bool> found_ignore_punctuation = Intl::GetBoolOption(
+ Maybe<bool> found_ignore_punctuation = GetBoolOption(
isolate, options, "ignorePunctuation", service, &ignore_punctuation);
MAYBE_RETURN(found_ignore_punctuation, MaybeHandle<JSCollator>());
diff --git a/deps/v8/src/objects/js-date-time-format-inl.h b/deps/v8/src/objects/js-date-time-format-inl.h
index 6e24da0589..8c93a8eeb6 100644
--- a/deps/v8/src/objects/js-date-time-format-inl.h
+++ b/deps/v8/src/objects/js-date-time-format-inl.h
@@ -28,7 +28,7 @@ ACCESSORS(JSDateTimeFormat, icu_simple_date_format,
ACCESSORS(JSDateTimeFormat, icu_date_interval_format,
Managed<icu::DateIntervalFormat>, kIcuDateIntervalFormatOffset)
-BOOL_ACCESSORS(JSDateTimeFormat, flags, iso8601, Iso8601Bit::kShift)
+BOOL_ACCESSORS(JSDateTimeFormat, flags, alt_calendar, AltCalendarBit::kShift)
inline void JSDateTimeFormat::set_hour_cycle(HourCycle hour_cycle) {
int hints = flags();
diff --git a/deps/v8/src/objects/js-date-time-format.cc b/deps/v8/src/objects/js-date-time-format.cc
index 868b0a3be2..2258a1ffdf 100644
--- a/deps/v8/src/objects/js-date-time-format.cc
+++ b/deps/v8/src/objects/js-date-time-format.cc
@@ -20,7 +20,8 @@
#include "src/heap/factory.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-date-time-format-inl.h"
-
+#include "src/objects/managed-inl.h"
+#include "src/objects/option-utils.h"
#include "unicode/calendar.h"
#include "unicode/dtitvfmt.h"
#include "unicode/dtptngen.h"
@@ -76,9 +77,9 @@ JSDateTimeFormat::HourCycle ToHourCycle(UDateFormatHourCycle hc) {
Maybe<JSDateTimeFormat::HourCycle> GetHourCycle(Isolate* isolate,
Handle<JSReceiver> options,
- const char* method) {
- return Intl::GetStringOption<JSDateTimeFormat::HourCycle>(
- isolate, options, "hourCycle", method, {"h11", "h12", "h23", "h24"},
+ const char* method_name) {
+ return GetStringOption<JSDateTimeFormat::HourCycle>(
+ isolate, options, "hourCycle", method_name, {"h11", "h12", "h23", "h24"},
{JSDateTimeFormat::HourCycle::kH11, JSDateTimeFormat::HourCycle::kH12,
JSDateTimeFormat::HourCycle::kH23, JSDateTimeFormat::HourCycle::kH24},
JSDateTimeFormat::HourCycle::kUndefined);
@@ -525,13 +526,17 @@ MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
// and
// http://www.unicode.org/repos/cldr/tags/latest/common/bcp47/calendar.xml
if (calendar_str == "gregorian") {
- if (date_time_format->iso8601()) {
+ if (date_time_format->alt_calendar()) {
calendar_str = "iso8601";
} else {
calendar_str = "gregory";
}
} else if (calendar_str == "ethiopic-amete-alem") {
calendar_str = "ethioaa";
+ } else if (calendar_str == "islamic") {
+ if (date_time_format->alt_calendar()) {
+ calendar_str = "islamic-rgsa";
+ }
}
const icu::TimeZone& tz = calendar->getTimeZone();
@@ -771,7 +776,7 @@ Isolate::ICUObjectCacheType ConvertToCacheType(
MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime(
Isolate* isolate, Handle<Object> date, Handle<Object> locales,
Handle<Object> options, RequiredOption required, DefaultsOption defaults,
- const char* method) {
+ const char* method_name) {
Isolate::ICUObjectCacheType cache_type = ConvertToCacheType(defaults);
Factory* factory = isolate->factory();
@@ -821,7 +826,8 @@ MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime(
Handle<JSDateTimeFormat> date_time_format;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, date_time_format,
- JSDateTimeFormat::New(isolate, map, locales, internal_options, method),
+ JSDateTimeFormat::New(isolate, map, locales, internal_options,
+ method_name),
String);
if (can_cache) {
@@ -1499,7 +1505,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
const std::vector<const char*> empty_values = {};
// 6. Let calendar be ? GetOption(options, "calendar",
// "string", undefined, undefined).
- Maybe<bool> maybe_calendar = Intl::GetStringOption(
+ Maybe<bool> maybe_calendar = GetStringOption(
isolate, options, "calendar", empty_values, service, &calendar_str);
MAYBE_RETURN(maybe_calendar, MaybeHandle<JSDateTimeFormat>());
if (maybe_calendar.FromJust() && calendar_str != nullptr) {
@@ -1523,7 +1529,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// undefined).
bool hour12;
Maybe<bool> maybe_get_hour12 =
- Intl::GetBoolOption(isolate, options, "hour12", service, &hour12);
+ GetBoolOption(isolate, options, "hour12", service, &hour12);
MAYBE_RETURN(maybe_get_hour12, Handle<JSDateTimeFormat>());
// 7. Let hourCycle be ? GetOption(options, "hourCycle", "string", « "h11",
@@ -1588,7 +1594,9 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
icu_locale.setUnicodeKeywordValue("ca", calendar_str.get(), status);
DCHECK(U_SUCCESS(status));
}
- bool iso8601 = strstr(icu_locale.getName(), "calendar=iso8601") != nullptr;
+ bool alt_calendar =
+ strstr(icu_locale.getName(), "calendar=iso8601") != nullptr ||
+ strstr(icu_locale.getName(), "calendar=islamic-rgsa") != nullptr;
if (numbering_system_str != nullptr &&
Intl::IsValidNumberingSystem(numbering_system_str.get())) {
@@ -1651,7 +1659,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// 17. Let timeZone be ? Get(options, "timeZone").
std::unique_ptr<char[]> timezone = nullptr;
- Maybe<bool> maybe_timezone = Intl::GetStringOption(
+ Maybe<bool> maybe_timezone = GetStringOption(
isolate, options, "timeZone", empty_values, service, &timezone);
MAYBE_RETURN(maybe_timezone, Handle<JSDateTimeFormat>());
@@ -1689,7 +1697,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
if (item.property == "timeZoneName") {
// Let _value_ be ? GetNumberOption(options, "fractionalSecondDigits", 1,
// 3, *undefined*). The *undefined* is represented by value 0 here.
- Maybe<int> maybe_fsd = Intl::GetNumberOption(
+ Maybe<int> maybe_fsd = GetNumberOption(
isolate, options, factory->fractionalSecondDigits_string(), 1, 3, 0);
MAYBE_RETURN(maybe_fsd, MaybeHandle<JSDateTimeFormat>());
// Convert fractionalSecondDigits to skeleton.
@@ -1703,8 +1711,8 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// ii. Let value be ? GetOption(options, prop, "string", « the strings
// given in the Values column of the row », undefined).
Maybe<bool> maybe_get_option =
- Intl::GetStringOption(isolate, options, item.property.c_str(),
- item.allowed_values, service, &input);
+ GetStringOption(isolate, options, item.property.c_str(),
+ item.allowed_values, service, &input);
MAYBE_RETURN(maybe_get_option, Handle<JSDateTimeFormat>());
if (maybe_get_option.FromJust()) {
if (item.property == "hour") {
@@ -1724,7 +1732,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// c. Let matcher be ? GetOption(options, "formatMatcher", "string",
// « "basic", "best fit" », "best fit").
Maybe<FormatMatcherOption> maybe_format_matcher =
- Intl::GetStringOption<FormatMatcherOption>(
+ GetStringOption<FormatMatcherOption>(
isolate, options, "formatMatcher", service, {"best fit", "basic"},
{FormatMatcherOption::kBestFit, FormatMatcherOption::kBasic},
FormatMatcherOption::kBestFit);
@@ -1734,7 +1742,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// 32. Let dateStyle be ? GetOption(options, "dateStyle", "string", «
// "full", "long", "medium", "short" », undefined).
- Maybe<DateTimeStyle> maybe_date_style = Intl::GetStringOption<DateTimeStyle>(
+ Maybe<DateTimeStyle> maybe_date_style = GetStringOption<DateTimeStyle>(
isolate, options, "dateStyle", service,
{"full", "long", "medium", "short"},
{DateTimeStyle::kFull, DateTimeStyle::kLong, DateTimeStyle::kMedium,
@@ -1746,7 +1754,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// 34. Let timeStyle be ? GetOption(options, "timeStyle", "string", «
// "full", "long", "medium", "short" »).
- Maybe<DateTimeStyle> maybe_time_style = Intl::GetStringOption<DateTimeStyle>(
+ Maybe<DateTimeStyle> maybe_time_style = GetStringOption<DateTimeStyle>(
isolate, options, "timeStyle", service,
{"full", "long", "medium", "short"},
{DateTimeStyle::kFull, DateTimeStyle::kLong, DateTimeStyle::kMedium,
@@ -1896,7 +1904,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
date_time_format->set_time_style(time_style);
}
date_time_format->set_hour_cycle(dateTimeFormatHourCycle);
- date_time_format->set_iso8601(iso8601);
+ date_time_format->set_alt_calendar(alt_calendar);
date_time_format->set_locale(*locale_str);
date_time_format->set_icu_locale(*managed_locale);
date_time_format->set_icu_simple_date_format(*managed_format);
diff --git a/deps/v8/src/objects/js-date-time-format.h b/deps/v8/src/objects/js-date-time-format.h
index 335d80a2db..9c5b2f9dc8 100644
--- a/deps/v8/src/objects/js-date-time-format.h
+++ b/deps/v8/src/objects/js-date-time-format.h
@@ -16,7 +16,6 @@
#include "src/execution/isolate.h"
#include "src/objects/intl-objects.h"
#include "src/objects/managed.h"
-#include "torque-generated/field-offsets.h"
#include "unicode/uversion.h"
// Has to be the last include (doesn't have include guards):
@@ -82,7 +81,7 @@ class JSDateTimeFormat
V8_WARN_UNUSED_RESULT static MaybeHandle<String> ToLocaleDateTime(
Isolate* isolate, Handle<Object> date, Handle<Object> locales,
Handle<Object> options, RequiredOption required, DefaultsOption defaults,
- const char* method);
+ const char* method_name);
V8_EXPORT_PRIVATE static const std::set<std::string>& GetAvailableLocales();
@@ -128,7 +127,7 @@ class JSDateTimeFormat
DECL_ACCESSORS(icu_simple_date_format, Managed<icu::SimpleDateFormat>)
DECL_ACCESSORS(icu_date_interval_format, Managed<icu::DateIntervalFormat>)
- DECL_BOOLEAN_ACCESSORS(iso8601)
+ DECL_BOOLEAN_ACCESSORS(alt_calendar)
DECL_PRINTER(JSDateTimeFormat)
diff --git a/deps/v8/src/objects/js-date-time-format.tq b/deps/v8/src/objects/js-date-time-format.tq
index fedd761cdf..ef0584e790 100644
--- a/deps/v8/src/objects/js-date-time-format.tq
+++ b/deps/v8/src/objects/js-date-time-format.tq
@@ -10,7 +10,14 @@ bitfield struct JSDateTimeFormatFlags extends uint31 {
hour_cycle: HourCycle: 3 bit;
date_style: DateTimeStyle: 3 bit;
time_style: DateTimeStyle: 3 bit;
- iso8601: bool: 1bit;
+ // ICU report the same type "gregorian" for both "gregorian" calendar and
+ // "iso8601" calendar and the same type "islamic" for both "islamic" and
+ // "islamic-rgsa" calendar. We use the alt_calendar bit to distinguish between
+ // them. When the type is "gregorian" and the alt_calendar bit is set, it is
+ // "iso8601", otherwise the true "gregorian" calendar. While the type is
+ // "islamic" and the alt_calendar bit is set, it is "islamic-rgsa" calendar,
+ // otherwise "islamic" calendar.
+ alt_calendar: bool: 1bit;
}
extern class JSDateTimeFormat extends JSObject {
diff --git a/deps/v8/src/objects/js-display-names.cc b/deps/v8/src/objects/js-display-names.cc
index d4f05ad739..d2e1064967 100644
--- a/deps/v8/src/objects/js-display-names.cc
+++ b/deps/v8/src/objects/js-display-names.cc
@@ -15,8 +15,9 @@
#include "src/heap/factory.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-display-names-inl.h"
-#include "src/objects/managed.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/option-utils.h"
#include "unicode/dtfmtsym.h"
#include "unicode/dtptngen.h"
#include "unicode/localebuilder.h"
@@ -118,8 +119,11 @@ class LanguageNames : public LocaleDisplayNamesCommon {
LanguageNames(const icu::Locale& locale, JSDisplayNames::Style style,
bool fallback, bool dialect)
: LocaleDisplayNamesCommon(locale, style, fallback, dialect) {}
+
~LanguageNames() override = default;
+
const char* type() const override { return "language"; }
+
Maybe<icu::UnicodeString> of(Isolate* isolate,
const char* code) const override {
UErrorCode status = U_ZERO_ERROR;
@@ -152,8 +156,11 @@ class RegionNames : public LocaleDisplayNamesCommon {
RegionNames(const icu::Locale& locale, JSDisplayNames::Style style,
bool fallback, bool dialect)
: LocaleDisplayNamesCommon(locale, style, fallback, dialect) {}
+
~RegionNames() override = default;
+
const char* type() const override { return "region"; }
+
Maybe<icu::UnicodeString> of(Isolate* isolate,
const char* code) const override {
std::string code_str(code);
@@ -174,8 +181,11 @@ class ScriptNames : public LocaleDisplayNamesCommon {
ScriptNames(const icu::Locale& locale, JSDisplayNames::Style style,
bool fallback, bool dialect)
: LocaleDisplayNamesCommon(locale, style, fallback, dialect) {}
+
~ScriptNames() override = default;
+
const char* type() const override { return "script"; }
+
Maybe<icu::UnicodeString> of(Isolate* isolate,
const char* code) const override {
std::string code_str(code);
@@ -194,30 +204,47 @@ class ScriptNames : public LocaleDisplayNamesCommon {
class KeyValueDisplayNames : public LocaleDisplayNamesCommon {
public:
KeyValueDisplayNames(const icu::Locale& locale, JSDisplayNames::Style style,
- bool fallback, bool dialect, const char* key)
- : LocaleDisplayNamesCommon(locale, style, fallback, dialect), key_(key) {}
+ bool fallback, bool dialect, const char* key,
+ bool prevent_fallback)
+ : LocaleDisplayNamesCommon(locale, style, fallback, dialect),
+ key_(key),
+ prevent_fallback_(prevent_fallback) {}
+
~KeyValueDisplayNames() override = default;
+
const char* type() const override { return key_.c_str(); }
+
Maybe<icu::UnicodeString> of(Isolate* isolate,
const char* code) const override {
std::string code_str(code);
icu::UnicodeString result;
locale_display_names()->keyValueDisplayName(key_.c_str(), code_str.c_str(),
result);
+ // Work around the issue that the keyValueDisplayNames ignore no
+ // substituion and always fallback.
+ if (prevent_fallback_ && (result.length() == 3) &&
+ (code_str.length() == 3) &&
+ (result == icu::UnicodeString(code_str.c_str(), -1, US_INV))) {
+ result.setToBogus();
+ }
return Just(result);
}
private:
std::string key_;
+ bool prevent_fallback_;
};
class CurrencyNames : public KeyValueDisplayNames {
public:
CurrencyNames(const icu::Locale& locale, JSDisplayNames::Style style,
bool fallback, bool dialect)
- : KeyValueDisplayNames(locale, style, fallback, dialect, "currency") {}
+ : KeyValueDisplayNames(locale, style, fallback, dialect, "currency",
+ fallback == false) {}
+
~CurrencyNames() override = default;
+
Maybe<icu::UnicodeString> of(Isolate* isolate,
const char* code) const override {
std::string code_str(code);
@@ -234,8 +261,11 @@ class CalendarNames : public KeyValueDisplayNames {
public:
CalendarNames(const icu::Locale& locale, JSDisplayNames::Style style,
bool fallback, bool dialect)
- : KeyValueDisplayNames(locale, style, fallback, dialect, "calendar") {}
+ : KeyValueDisplayNames(locale, style, fallback, dialect, "calendar",
+ false) {}
+
~CalendarNames() override = default;
+
Maybe<icu::UnicodeString> of(Isolate* isolate,
const char* code) const override {
std::string code_str(code);
@@ -312,9 +342,13 @@ class DateTimeFieldNames : public DisplayNamesInternal {
icu::DateTimePatternGenerator::createInstance(locale_, status));
DCHECK(U_SUCCESS(status));
}
+
~DateTimeFieldNames() override = default;
+
const char* type() const override { return "dateTimeField"; }
+
icu::Locale locale() const override { return locale_; }
+
Maybe<icu::UnicodeString> of(Isolate* isolate,
const char* code) const override {
UDateTimePatternField field = StringToUDateTimePatternField(code);
@@ -372,9 +406,9 @@ MaybeHandle<JSDisplayNames> JSDisplayNames::New(Isolate* isolate,
maybe_requested_locales.FromJust();
// 4. Let options be ? GetOptionsObject(options).
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, options, Intl::GetOptionsObject(isolate, input_options, service),
- JSDisplayNames);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
+ GetOptionsObject(isolate, input_options, service),
+ JSDisplayNames);
// Note: No need to create a record. It's not observable.
// 5. Let opt be a new Record.
@@ -409,7 +443,7 @@ MaybeHandle<JSDisplayNames> JSDisplayNames::New(Isolate* isolate,
// 10. Let s be ? GetOption(options, "style", "string",
// «"long", "short", "narrow"», "long").
- Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
+ Maybe<Style> maybe_style = GetStringOption<Style>(
isolate, options, "style", service, {"long", "short", "narrow"},
{Style::kLong, Style::kShort, Style::kNarrow}, Style::kLong);
MAYBE_RETURN(maybe_style, MaybeHandle<JSDisplayNames>());
@@ -422,23 +456,22 @@ MaybeHandle<JSDisplayNames> JSDisplayNames::New(Isolate* isolate,
// undefined).
Maybe<Type> maybe_type =
FLAG_harmony_intl_displaynames_v2
- ? Intl::GetStringOption<Type>(
+ ? GetStringOption<Type>(
isolate, options, "type", service,
{"language", "region", "script", "currency", "calendar",
"dateTimeField"},
{Type::kLanguage, Type::kRegion, Type::kScript, Type::kCurrency,
Type::kCalendar, Type::kDateTimeField},
Type::kUndefined)
- : Intl::GetStringOption<Type>(
- isolate, options, "type", service,
- {"language", "region", "script", "currency"},
- {
- Type::kLanguage,
- Type::kRegion,
- Type::kScript,
- Type::kCurrency,
- },
- Type::kUndefined);
+ : GetStringOption<Type>(isolate, options, "type", service,
+ {"language", "region", "script", "currency"},
+ {
+ Type::kLanguage,
+ Type::kRegion,
+ Type::kScript,
+ Type::kCurrency,
+ },
+ Type::kUndefined);
MAYBE_RETURN(maybe_type, MaybeHandle<JSDisplayNames>());
Type type_enum = maybe_type.FromJust();
@@ -452,7 +485,7 @@ MaybeHandle<JSDisplayNames> JSDisplayNames::New(Isolate* isolate,
// 15. Let fallback be ? GetOption(options, "fallback", "string",
// « "code", "none" », "code").
- Maybe<Fallback> maybe_fallback = Intl::GetStringOption<Fallback>(
+ Maybe<Fallback> maybe_fallback = GetStringOption<Fallback>(
isolate, options, "fallback", service, {"code", "none"},
{Fallback::kCode, Fallback::kNone}, Fallback::kCode);
MAYBE_RETURN(maybe_fallback, MaybeHandle<JSDisplayNames>());
@@ -465,7 +498,7 @@ MaybeHandle<JSDisplayNames> JSDisplayNames::New(Isolate* isolate,
// 24. Let languageDisplay be ? GetOption(options, "languageDisplay",
// "string", « "dialect", "standard" », "dialect").
Maybe<LanguageDisplay> maybe_language_display =
- Intl::GetStringOption<LanguageDisplay>(
+ GetStringOption<LanguageDisplay>(
isolate, options, "languageDisplay", service,
{"dialect", "standard"},
{LanguageDisplay::kDialect, LanguageDisplay::kStandard},
diff --git a/deps/v8/src/objects/js-function.cc b/deps/v8/src/objects/js-function.cc
index 3bcaf07387..105f6388af 100644
--- a/deps/v8/src/objects/js-function.cc
+++ b/deps/v8/src/objects/js-function.cc
@@ -549,6 +549,7 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_DATE_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
case JS_FUNCTION_TYPE:
+ case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_PROMISE_CONSTRUCTOR_TYPE:
case JS_REG_EXP_CONSTRUCTOR_TYPE:
case JS_ARRAY_CONSTRUCTOR_TYPE:
diff --git a/deps/v8/src/objects/js-function.h b/deps/v8/src/objects/js-function.h
index b7df4daf8b..85a1236e41 100644
--- a/deps/v8/src/objects/js-function.h
+++ b/deps/v8/src/objects/js-function.h
@@ -7,7 +7,6 @@
#include "src/objects/code-kind.h"
#include "src/objects/js-objects.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-function.tq b/deps/v8/src/objects/js-function.tq
index 8932ea4395..59dd2d5dc2 100644
--- a/deps/v8/src/objects/js-function.tq
+++ b/deps/v8/src/objects/js-function.tq
@@ -17,6 +17,8 @@ extern class JSBoundFunction extends JSFunctionOrBoundFunction {
bound_arguments: FixedArray;
}
+// This class does not use the generated verifier, so if you change anything
+// here, please also update JSFunctionVerify in objects-debug.cc.
@highestInstanceTypeWithinParentClassRange
extern class JSFunction extends JSFunctionOrBoundFunction {
shared_function_info: SharedFunctionInfo;
@@ -25,7 +27,15 @@ extern class JSFunction extends JSFunctionOrBoundFunction {
@if(V8_EXTERNAL_CODE_SPACE) code: CodeDataContainer;
@ifnot(V8_EXTERNAL_CODE_SPACE) code: Code;
// Space for the following field may or may not be allocated.
- @noVerifier prototype_or_initial_map: JSReceiver|Map;
+ prototype_or_initial_map: JSReceiver|Map;
}
+// Class constructors are special, because they are callable, but [[Call]] will
+// raise an exception.
+// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
+@doNotGenerateCast
+@highestInstanceTypeWithinParentClassRange
+extern class JSClassConstructor extends JSFunction
+ generates 'TNode<JSFunction>';
+
type JSFunctionWithPrototypeSlot extends JSFunction;
diff --git a/deps/v8/src/objects/js-list-format.cc b/deps/v8/src/objects/js-list-format.cc
index ae9e7302bf..6830d4f992 100644
--- a/deps/v8/src/objects/js-list-format.cc
+++ b/deps/v8/src/objects/js-list-format.cc
@@ -18,8 +18,9 @@
#include "src/objects/intl-objects.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-list-format-inl.h"
-#include "src/objects/managed.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/option-utils.h"
#include "unicode/fieldpos.h"
#include "unicode/fpositer.h"
#include "unicode/listformatter.h"
@@ -69,9 +70,9 @@ MaybeHandle<JSListFormat> JSListFormat::New(Isolate* isolate, Handle<Map> map,
Handle<JSReceiver> options;
const char* service = "Intl.ListFormat";
// 4. Let options be GetOptionsObject(_options_).
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, options, Intl::GetOptionsObject(isolate, input_options, service),
- JSListFormat);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
+ GetOptionsObject(isolate, input_options, service),
+ JSListFormat);
// Note: No need to create a record. It's not observable.
// 6. Let opt be a new Record.
@@ -100,7 +101,7 @@ MaybeHandle<JSListFormat> JSListFormat::New(Isolate* isolate, Handle<Map> map,
// 12. Let t be GetOption(options, "type", "string", «"conjunction",
// "disjunction", "unit"», "conjunction").
- Maybe<Type> maybe_type = Intl::GetStringOption<Type>(
+ Maybe<Type> maybe_type = GetStringOption<Type>(
isolate, options, "type", service, {"conjunction", "disjunction", "unit"},
{Type::CONJUNCTION, Type::DISJUNCTION, Type::UNIT}, Type::CONJUNCTION);
MAYBE_RETURN(maybe_type, MaybeHandle<JSListFormat>());
@@ -108,7 +109,7 @@ MaybeHandle<JSListFormat> JSListFormat::New(Isolate* isolate, Handle<Map> map,
// 14. Let s be ? GetOption(options, "style", "string",
// «"long", "short", "narrow"», "long").
- Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
+ Maybe<Style> maybe_style = GetStringOption<Style>(
isolate, options, "style", service, {"long", "short", "narrow"},
{Style::LONG, Style::SHORT, Style::NARROW}, Style::LONG);
MAYBE_RETURN(maybe_style, MaybeHandle<JSListFormat>());
diff --git a/deps/v8/src/objects/js-locale.cc b/deps/v8/src/objects/js-locale.cc
index 51cf1453f4..05f4a7302d 100644
--- a/deps/v8/src/objects/js-locale.cc
+++ b/deps/v8/src/objects/js-locale.cc
@@ -15,11 +15,12 @@
#include "src/api/api.h"
#include "src/execution/isolate.h"
-#include "src/handles/global-handles.h"
#include "src/heap/factory.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-locale-inl.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/option-utils.h"
#include "unicode/calendar.h"
#include "unicode/char16ptr.h"
#include "unicode/coll.h"
@@ -70,11 +71,11 @@ Maybe<bool> InsertOptionsIntoLocale(Isolate* isolate,
bool value_bool = false;
Maybe<bool> maybe_found =
option_to_bcp47.is_bool_value
- ? Intl::GetBoolOption(isolate, options, option_to_bcp47.name,
- "locale", &value_bool)
- : Intl::GetStringOption(isolate, options, option_to_bcp47.name,
- *(option_to_bcp47.possible_values),
- "locale", &value_str);
+ ? GetBoolOption(isolate, options, option_to_bcp47.name, "locale",
+ &value_bool)
+ : GetStringOption(isolate, options, option_to_bcp47.name,
+ *(option_to_bcp47.possible_values), "locale",
+ &value_str);
MAYBE_RETURN(maybe_found, Nothing<bool>());
// TODO(cira): Use fallback value if value is not found to make
@@ -183,19 +184,11 @@ bool JSLocale::Is38AlphaNumList(const std::string& in) {
std::string value = in;
while (true) {
std::size_t found_dash = value.find("-");
- std::size_t found_underscore = value.find("_");
- if (found_dash == std::string::npos &&
- found_underscore == std::string::npos) {
+ if (found_dash == std::string::npos) {
return IsAlphanum(value, 3, 8);
}
- if (found_underscore == std::string::npos ||
- found_dash < found_underscore) {
- if (!IsAlphanum(value.substr(0, found_dash), 3, 8)) return false;
- value = value.substr(found_dash + 1);
- } else {
- if (!IsAlphanum(value.substr(0, found_underscore), 3, 8)) return false;
- value = value.substr(found_underscore + 1);
- }
+ if (!IsAlphanum(value.substr(0, found_dash), 3, 8)) return false;
+ value = value.substr(found_dash + 1);
}
}
@@ -274,8 +267,8 @@ Maybe<bool> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
const std::vector<const char*> empty_values = {};
std::unique_ptr<char[]> language_str = nullptr;
Maybe<bool> maybe_language =
- Intl::GetStringOption(isolate, options, "language", empty_values,
- "ApplyOptionsToTag", &language_str);
+ GetStringOption(isolate, options, "language", empty_values,
+ "ApplyOptionsToTag", &language_str);
MAYBE_RETURN(maybe_language, Nothing<bool>());
// 4. If language is not undefined, then
if (maybe_language.FromJust()) {
@@ -292,8 +285,8 @@ Maybe<bool> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
// undefined).
std::unique_ptr<char[]> script_str = nullptr;
Maybe<bool> maybe_script =
- Intl::GetStringOption(isolate, options, "script", empty_values,
- "ApplyOptionsToTag", &script_str);
+ GetStringOption(isolate, options, "script", empty_values,
+ "ApplyOptionsToTag", &script_str);
MAYBE_RETURN(maybe_script, Nothing<bool>());
// 6. If script is not undefined, then
if (maybe_script.FromJust()) {
@@ -309,8 +302,8 @@ Maybe<bool> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
// undefined).
std::unique_ptr<char[]> region_str = nullptr;
Maybe<bool> maybe_region =
- Intl::GetStringOption(isolate, options, "region", empty_values,
- "ApplyOptionsToTag", &region_str);
+ GetStringOption(isolate, options, "region", empty_values,
+ "ApplyOptionsToTag", &region_str);
MAYBE_RETURN(maybe_region, Nothing<bool>());
// 8. If region is not undefined, then
if (maybe_region.FromJust()) {
diff --git a/deps/v8/src/objects/js-number-format.cc b/deps/v8/src/objects/js-number-format.cc
index cf093f7fa5..cc337a0df2 100644
--- a/deps/v8/src/objects/js-number-format.cc
+++ b/deps/v8/src/objects/js-number-format.cc
@@ -14,7 +14,9 @@
#include "src/execution/isolate.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-number-format-inl.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/option-utils.h"
#include "unicode/currunit.h"
#include "unicode/decimfmt.h"
#include "unicode/locid.h"
@@ -816,8 +818,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// 2. Set options to ? CoerceOptionsToObject(options).
Handle<JSReceiver> options;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, options,
- Intl::CoerceOptionsToObject(isolate, options_obj, service),
+ isolate, options, CoerceOptionsToObject(isolate, options_obj, service),
JSNumberFormat);
// 4. Let opt be a new Record.
@@ -899,7 +900,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// 3. Let style be ? GetOption(options, "style", "string", « "decimal",
// "percent", "currency", "unit" », "decimal").
- Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
+ Maybe<Style> maybe_style = GetStringOption<Style>(
isolate, options, "style", service,
{"decimal", "percent", "currency", "unit"},
{Style::DECIMAL, Style::PERCENT, Style::CURRENCY, Style::UNIT},
@@ -913,7 +914,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// undefined).
std::unique_ptr<char[]> currency_cstr;
const std::vector<const char*> empty_values = {};
- Maybe<bool> found_currency = Intl::GetStringOption(
+ Maybe<bool> found_currency = GetStringOption(
isolate, options, "currency", empty_values, service, &currency_cstr);
MAYBE_RETURN(found_currency, MaybeHandle<JSNumberFormat>());
@@ -943,7 +944,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// 8. Let currencyDisplay be ? GetOption(options, "currencyDisplay",
// "string", « "code", "symbol", "name", "narrowSymbol" », "symbol").
Maybe<CurrencyDisplay> maybe_currency_display =
- Intl::GetStringOption<CurrencyDisplay>(
+ GetStringOption<CurrencyDisplay>(
isolate, options, "currencyDisplay", service,
{"code", "symbol", "name", "narrowSymbol"},
{CurrencyDisplay::CODE, CurrencyDisplay::SYMBOL,
@@ -955,7 +956,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
CurrencySign currency_sign = CurrencySign::STANDARD;
// 9. Let currencySign be ? GetOption(options, "currencySign", "string", «
// "standard", "accounting" », "standard").
- Maybe<CurrencySign> maybe_currency_sign = Intl::GetStringOption<CurrencySign>(
+ Maybe<CurrencySign> maybe_currency_sign = GetStringOption<CurrencySign>(
isolate, options, "currencySign", service, {"standard", "accounting"},
{CurrencySign::STANDARD, CurrencySign::ACCOUNTING},
CurrencySign::STANDARD);
@@ -965,8 +966,8 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// 10. Let unit be ? GetOption(options, "unit", "string", undefined,
// undefined).
std::unique_ptr<char[]> unit_cstr;
- Maybe<bool> found_unit = Intl::GetStringOption(
- isolate, options, "unit", empty_values, service, &unit_cstr);
+ Maybe<bool> found_unit = GetStringOption(isolate, options, "unit",
+ empty_values, service, &unit_cstr);
MAYBE_RETURN(found_unit, MaybeHandle<JSNumberFormat>());
std::pair<icu::MeasureUnit, icu::MeasureUnit> unit_pair;
@@ -1001,7 +1002,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// 13. Let unitDisplay be ? GetOption(options, "unitDisplay", "string", «
// "short", "narrow", "long" », "short").
- Maybe<UnitDisplay> maybe_unit_display = Intl::GetStringOption<UnitDisplay>(
+ Maybe<UnitDisplay> maybe_unit_display = GetStringOption<UnitDisplay>(
isolate, options, "unitDisplay", service, {"short", "narrow", "long"},
{UnitDisplay::SHORT, UnitDisplay::NARROW, UnitDisplay::LONG},
UnitDisplay::SHORT);
@@ -1097,7 +1098,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
Notation notation = Notation::STANDARD;
// 25. Let notation be ? GetOption(options, "notation", "string", «
// "standard", "scientific", "engineering", "compact" », "standard").
- Maybe<Notation> maybe_notation = Intl::GetStringOption<Notation>(
+ Maybe<Notation> maybe_notation = GetStringOption<Notation>(
isolate, options, "notation", service,
{"standard", "scientific", "engineering", "compact"},
{Notation::STANDARD, Notation::SCIENTIFIC, Notation::ENGINEERING,
@@ -1119,10 +1120,9 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// 28. Let compactDisplay be ? GetOption(options, "compactDisplay",
// "string", « "short", "long" », "short").
- Maybe<CompactDisplay> maybe_compact_display =
- Intl::GetStringOption<CompactDisplay>(
- isolate, options, "compactDisplay", service, {"short", "long"},
- {CompactDisplay::SHORT, CompactDisplay::LONG}, CompactDisplay::SHORT);
+ Maybe<CompactDisplay> maybe_compact_display = GetStringOption<CompactDisplay>(
+ isolate, options, "compactDisplay", service, {"short", "long"},
+ {CompactDisplay::SHORT, CompactDisplay::LONG}, CompactDisplay::SHORT);
MAYBE_RETURN(maybe_compact_display, MaybeHandle<JSNumberFormat>());
CompactDisplay compact_display = maybe_compact_display.FromJust();
@@ -1136,8 +1136,8 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// 30. Let useGrouping be ? GetOption(options, "useGrouping", "boolean",
// undefined, true).
bool use_grouping = true;
- Maybe<bool> found_use_grouping = Intl::GetBoolOption(
- isolate, options, "useGrouping", service, &use_grouping);
+ Maybe<bool> found_use_grouping =
+ GetBoolOption(isolate, options, "useGrouping", service, &use_grouping);
MAYBE_RETURN(found_use_grouping, MaybeHandle<JSNumberFormat>());
// 31. Set numberFormat.[[UseGrouping]] to useGrouping.
if (!use_grouping) {
@@ -1147,7 +1147,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
// 32. Let signDisplay be ? GetOption(options, "signDisplay", "string", «
// "auto", "never", "always", "exceptZero" », "auto").
- Maybe<SignDisplay> maybe_sign_display = Intl::GetStringOption<SignDisplay>(
+ Maybe<SignDisplay> maybe_sign_display = GetStringOption<SignDisplay>(
isolate, options, "signDisplay", service,
{"auto", "never", "always", "exceptZero"},
{SignDisplay::AUTO, SignDisplay::NEVER, SignDisplay::ALWAYS,
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index c35999592a..dbe3f7f401 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -397,7 +397,7 @@ void JSObject::FastPropertyAtPut(FieldIndex index, Object value,
void JSObject::WriteToField(InternalIndex descriptor, PropertyDetails details,
Object value) {
- DCHECK_EQ(kField, details.location());
+ DCHECK_EQ(PropertyLocation::kField, details.location());
DCHECK_EQ(kData, details.kind());
DisallowGarbageCollection no_gc;
FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
diff --git a/deps/v8/src/objects/js-objects.cc b/deps/v8/src/objects/js-objects.cc
index cdd16a65a6..008036b11f 100644
--- a/deps/v8/src/objects/js-objects.cc
+++ b/deps/v8/src/objects/js-objects.cc
@@ -260,7 +260,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
PropertyDetails details = descriptors->GetDetails(i);
if (!details.IsEnumerable()) continue;
if (details.kind() == kData) {
- if (details.location() == kDescriptor) {
+ if (details.location() == PropertyLocation::kDescriptor) {
prop_value = handle(descriptors->GetStrongValue(i), isolate);
} else {
Representation representation = details.representation();
@@ -1055,6 +1055,11 @@ Maybe<bool> JSReceiver::DefineOwnProperty(Isolate* isolate,
return JSTypedArray::DefineOwnProperty(
isolate, Handle<JSTypedArray>::cast(object), key, desc, should_throw);
}
+ if (object->IsJSModuleNamespace()) {
+ return JSModuleNamespace::DefineOwnProperty(
+ isolate, Handle<JSModuleNamespace>::cast(object), key, desc,
+ should_throw);
+ }
// OrdinaryDefineOwnProperty, by virtue of calling
// DefineOwnPropertyIgnoreAttributes, can handle arguments
@@ -1996,7 +2001,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
PropertyDetails details = descriptors->GetDetails(index);
if (!details.IsEnumerable()) continue;
if (details.kind() == kData) {
- if (details.location() == kDescriptor) {
+ if (details.location() == PropertyLocation::kDescriptor) {
prop_value = handle(descriptors->GetStrongValue(index), isolate);
} else {
Representation representation = details.representation();
@@ -2238,6 +2243,7 @@ int JSObject::GetHeaderSize(InstanceType type,
case JS_BOUND_FUNCTION_TYPE:
return JSBoundFunction::kHeaderSize;
case JS_FUNCTION_TYPE:
+ case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_PROMISE_CONSTRUCTOR_TYPE:
case JS_REG_EXP_CONSTRUCTOR_TYPE:
case JS_ARRAY_CONSTRUCTOR_TYPE:
@@ -2584,6 +2590,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
case TYPE##_TYPED_ARRAY_CONSTRUCTOR_TYPE:
TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTORS_SWITCH)
#undef TYPED_ARRAY_CONSTRUCTORS_SWITCH
+ case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_FUNCTION_TYPE: {
JSFunction function = JSFunction::cast(*this);
std::unique_ptr<char[]> fun_name = function.shared().DebugNameCStr();
@@ -2710,8 +2717,8 @@ void JSObject::PrintInstanceMigration(FILE* file, Map original_map,
if (!o_r.Equals(n_r)) {
String::cast(o.GetKey(i)).PrintOn(file);
PrintF(file, ":%s->%s ", o_r.Mnemonic(), n_r.Mnemonic());
- } else if (o.GetDetails(i).location() == kDescriptor &&
- n.GetDetails(i).location() == kField) {
+ } else if (o.GetDetails(i).location() == PropertyLocation::kDescriptor &&
+ n.GetDetails(i).location() == PropertyLocation::kField) {
Name name = o.GetKey(i);
if (name.IsString()) {
String::cast(name).PrintOn(file);
@@ -2817,7 +2824,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
// If the map adds a new kDescriptor property, simply set the map.
PropertyDetails details = new_map->GetLastDescriptorDetails(isolate);
- if (details.location() == kDescriptor) {
+ if (details.location() == PropertyLocation::kDescriptor) {
object->set_map(*new_map, kReleaseStore);
return;
}
@@ -2852,7 +2859,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
} else {
value = isolate->factory()->uninitialized_value();
}
- DCHECK_EQ(kField, details.location());
+ DCHECK_EQ(PropertyLocation::kField, details.location());
DCHECK_EQ(kData, details.kind());
DCHECK(!index.is_inobject()); // Must be a backing store index.
new_storage->set(index.outobject_array_index(), *value);
@@ -2902,13 +2909,13 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
for (InternalIndex i : InternalIndex::Range(old_nof)) {
PropertyDetails details = new_descriptors->GetDetails(i);
- if (details.location() != kField) continue;
+ if (details.location() != PropertyLocation::kField) continue;
DCHECK_EQ(kData, details.kind());
PropertyDetails old_details = old_descriptors->GetDetails(i);
Representation old_representation = old_details.representation();
Representation representation = details.representation();
Handle<Object> value;
- if (old_details.location() == kDescriptor) {
+ if (old_details.location() == PropertyLocation::kDescriptor) {
if (old_details.kind() == kAccessor) {
// In case of kAccessor -> kData property reconfiguration, the property
// must already be prepared for data of certain type.
@@ -2924,7 +2931,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
DCHECK(!old_representation.IsDouble() && !representation.IsDouble());
}
} else {
- DCHECK_EQ(kField, old_details.location());
+ DCHECK_EQ(PropertyLocation::kField, old_details.location());
FieldIndex index = FieldIndex::ForDescriptor(isolate, *old_map, i);
value = handle(object->RawFastPropertyAt(isolate, index), isolate);
if (!old_representation.IsDouble() && representation.IsDouble()) {
@@ -2946,7 +2953,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
for (InternalIndex i : InternalIndex::Range(old_nof, new_nof)) {
PropertyDetails details = new_descriptors->GetDetails(i);
- if (details.location() != kField) continue;
+ if (details.location() != PropertyLocation::kField) continue;
DCHECK_EQ(kData, details.kind());
Handle<Object> value;
if (details.representation().IsDouble()) {
@@ -3035,7 +3042,7 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
PropertyDetails details = descs->GetDetails(i);
Handle<Name> key(descs->GetKey(isolate, i), isolate);
Handle<Object> value;
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
FieldIndex index = FieldIndex::ForDescriptor(isolate, *map, i);
if (details.kind() == kData) {
value = handle(object->RawFastPropertyAt(isolate, index), isolate);
@@ -3050,7 +3057,7 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
}
} else {
- DCHECK_EQ(kDescriptor, details.location());
+ DCHECK_EQ(PropertyLocation::kDescriptor, details.location());
value = handle(descs->GetStrongValue(isolate, i), isolate);
}
DCHECK(!value.is_null());
@@ -3592,7 +3599,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
new_map->set_may_have_interesting_symbols(true);
}
- DCHECK_EQ(kField, details.location());
+ DCHECK_EQ(PropertyLocation::kField, details.location());
DCHECK_IMPLIES(!V8_DICT_PROPERTY_CONST_TRACKING_BOOL,
details.constness() == PropertyConstness::kMutable);
@@ -3617,7 +3624,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
details.attributes());
}
details = d.GetDetails();
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
if (current_offset < inobject_props) {
object->InObjectPropertyAtPut(current_offset, value,
UPDATE_WRITE_BARRIER);
@@ -4417,7 +4424,7 @@ Object JSObject::SlowReverseLookup(Object value) {
bool value_is_number = value.IsNumber();
for (InternalIndex i : map().IterateOwnDescriptors()) {
PropertyDetails details = descs.GetDetails(i);
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
DCHECK_EQ(kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(map(), i);
Object property = RawFastPropertyAt(field_index);
@@ -4430,7 +4437,7 @@ Object JSObject::SlowReverseLookup(Object value) {
return descs.GetKey(i);
}
} else {
- DCHECK_EQ(kDescriptor, details.location());
+ DCHECK_EQ(PropertyLocation::kDescriptor, details.location());
if (details.kind() == kData) {
if (descs.GetStrongValue(i) == value) {
return descs.GetKey(i);
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index d20cdaceb4..c0d3405f26 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -10,7 +10,6 @@
#include "src/objects/internal-index.h"
#include "src/objects/objects.h"
#include "src/objects/property-array.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-objects.tq b/deps/v8/src/objects/js-objects.tq
index 1ce7dbd9ea..927bca18de 100644
--- a/deps/v8/src/objects/js-objects.tq
+++ b/deps/v8/src/objects/js-objects.tq
@@ -55,7 +55,7 @@ macro GetDerivedMap(implicit context: Context)(
try {
const constructor =
Cast<JSFunctionWithPrototypeSlot>(newTarget) otherwise SlowPath;
- assert(IsConstructor(constructor));
+ dcheck(IsConstructor(constructor));
const map =
Cast<Map>(constructor.prototype_or_initial_map) otherwise SlowPath;
if (LoadConstructorOrBackPointer(map) != target) {
diff --git a/deps/v8/src/objects/js-plural-rules.cc b/deps/v8/src/objects/js-plural-rules.cc
index 9c2d77d6bc..ec15bd17cd 100644
--- a/deps/v8/src/objects/js-plural-rules.cc
+++ b/deps/v8/src/objects/js-plural-rules.cc
@@ -12,6 +12,8 @@
#include "src/objects/intl-objects.h"
#include "src/objects/js-number-format.h"
#include "src/objects/js-plural-rules-inl.h"
+#include "src/objects/managed-inl.h"
+#include "src/objects/option-utils.h"
#include "unicode/locid.h"
#include "unicode/numberformatter.h"
#include "unicode/plurrule.h"
@@ -74,8 +76,7 @@ MaybeHandle<JSPluralRules> JSPluralRules::New(Isolate* isolate, Handle<Map> map,
Handle<JSReceiver> options;
const char* service = "Intl.PluralRules";
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, options,
- Intl::CoerceOptionsToObject(isolate, options_obj, service),
+ isolate, options, CoerceOptionsToObject(isolate, options_obj, service),
JSPluralRules);
// 5. Let matcher be ? GetOption(options, "localeMatcher", "string",
@@ -88,7 +89,7 @@ MaybeHandle<JSPluralRules> JSPluralRules::New(Isolate* isolate, Handle<Map> map,
// 7. Let t be ? GetOption(options, "type", "string", « "cardinal",
// "ordinal" », "cardinal").
- Maybe<Type> maybe_type = Intl::GetStringOption<Type>(
+ Maybe<Type> maybe_type = GetStringOption<Type>(
isolate, options, "type", service, {"cardinal", "ordinal"},
{Type::CARDINAL, Type::ORDINAL}, Type::CARDINAL);
MAYBE_RETURN(maybe_type, MaybeHandle<JSPluralRules>());
diff --git a/deps/v8/src/objects/js-promise.tq b/deps/v8/src/objects/js-promise.tq
index be8fb06637..01426fd6d2 100644
--- a/deps/v8/src/objects/js-promise.tq
+++ b/deps/v8/src/objects/js-promise.tq
@@ -16,8 +16,8 @@ extern class JSPromise extends JSObject {
}
macro SetStatus(status: constexpr PromiseState): void {
- assert(this.Status() == PromiseState::kPending);
- assert(status != PromiseState::kPending);
+ dcheck(this.Status() == PromiseState::kPending);
+ dcheck(status != PromiseState::kPending);
this.flags.status = status;
}
diff --git a/deps/v8/src/objects/js-proxy.h b/deps/v8/src/objects/js-proxy.h
index c865b1ffd5..df89b4d17a 100644
--- a/deps/v8/src/objects/js-proxy.h
+++ b/deps/v8/src/objects/js-proxy.h
@@ -98,7 +98,6 @@ class JSProxy : public TorqueGeneratedJSProxy<JSProxy, JSReceiver> {
LookupIterator* it);
// Dispatched behavior.
- DECL_PRINTER(JSProxy)
DECL_VERIFIER(JSProxy)
static const int kMaxIterationLimit = 100 * 1024;
diff --git a/deps/v8/src/objects/js-regexp-inl.h b/deps/v8/src/objects/js-regexp-inl.h
index 2a69bea650..f4e38056f9 100644
--- a/deps/v8/src/objects/js-regexp-inl.h
+++ b/deps/v8/src/objects/js-regexp-inl.h
@@ -27,15 +27,15 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExpResultWithIndices)
ACCESSORS(JSRegExp, last_index, Object, kLastIndexOffset)
-JSRegExp::Type JSRegExp::TypeTag() const {
+JSRegExp::Type JSRegExp::type_tag() const {
Object data = this->data();
if (data.IsUndefined()) return JSRegExp::NOT_COMPILED;
Smi smi = Smi::cast(FixedArray::cast(data).get(kTagIndex));
return static_cast<JSRegExp::Type>(smi.value());
}
-int JSRegExp::CaptureCount() const {
- switch (TypeTag()) {
+int JSRegExp::capture_count() const {
+ switch (type_tag()) {
case ATOM:
return 0;
case EXPERIMENTAL:
@@ -46,61 +46,59 @@ int JSRegExp::CaptureCount() const {
}
}
-int JSRegExp::MaxRegisterCount() const {
- CHECK_EQ(TypeTag(), IRREGEXP);
+int JSRegExp::max_register_count() const {
+ CHECK_EQ(type_tag(), IRREGEXP);
return Smi::ToInt(DataAt(kIrregexpMaxRegisterCountIndex));
}
-JSRegExp::Flags JSRegExp::GetFlags() const {
- DCHECK(this->data().IsFixedArray());
- Object data = this->data();
- Smi smi = Smi::cast(FixedArray::cast(data).get(kFlagsIndex));
- return Flags(smi.value());
+String JSRegExp::atom_pattern() const {
+ DCHECK_EQ(type_tag(), ATOM);
+ return String::cast(DataAt(JSRegExp::kAtomPatternIndex));
}
-String JSRegExp::Pattern() {
- DCHECK(this->data().IsFixedArray());
- Object data = this->data();
- String pattern = String::cast(FixedArray::cast(data).get(kSourceIndex));
- return pattern;
+String JSRegExp::source() const {
+ return String::cast(TorqueGeneratedClass::source());
+}
+
+JSRegExp::Flags JSRegExp::flags() const {
+ Smi smi = Smi::cast(TorqueGeneratedClass::flags());
+ return Flags(smi.value());
}
String JSRegExp::EscapedPattern() {
DCHECK(this->source().IsString());
- String pattern = String::cast(source());
- return pattern;
+ return String::cast(source());
}
-Object JSRegExp::CaptureNameMap() {
- DCHECK(this->data().IsFixedArray());
- DCHECK(TypeSupportsCaptures(TypeTag()));
+Object JSRegExp::capture_name_map() {
+ DCHECK(TypeSupportsCaptures(type_tag()));
Object value = DataAt(kIrregexpCaptureNameMapIndex);
DCHECK_NE(value, Smi::FromInt(JSRegExp::kUninitializedValue));
return value;
}
+void JSRegExp::set_capture_name_map(Handle<FixedArray> capture_name_map) {
+ if (capture_name_map.is_null()) {
+ SetDataAt(JSRegExp::kIrregexpCaptureNameMapIndex, Smi::zero());
+ } else {
+ SetDataAt(JSRegExp::kIrregexpCaptureNameMapIndex, *capture_name_map);
+ }
+}
+
Object JSRegExp::DataAt(int index) const {
- DCHECK(TypeTag() != NOT_COMPILED);
+ DCHECK(type_tag() != NOT_COMPILED);
return FixedArray::cast(data()).get(index);
}
void JSRegExp::SetDataAt(int index, Object value) {
- DCHECK(TypeTag() != NOT_COMPILED);
- DCHECK_GE(index,
- kDataIndex); // Only implementation data can be set this way.
+ DCHECK(type_tag() != NOT_COMPILED);
+ // Only implementation data can be set this way.
+ DCHECK_GE(index, kFirstTypeSpecificIndex);
FixedArray::cast(data()).set(index, value);
}
-void JSRegExp::SetCaptureNameMap(Handle<FixedArray> capture_name_map) {
- if (capture_name_map.is_null()) {
- SetDataAt(JSRegExp::kIrregexpCaptureNameMapIndex, Smi::zero());
- } else {
- SetDataAt(JSRegExp::kIrregexpCaptureNameMapIndex, *capture_name_map);
- }
-}
-
bool JSRegExp::HasCompiledCode() const {
- if (TypeTag() != IRREGEXP) return false;
+ if (type_tag() != IRREGEXP) return false;
Smi uninitialized = Smi::FromInt(kUninitializedValue);
#ifdef DEBUG
DCHECK(DataAt(kIrregexpLatin1CodeIndex).IsCodeT() ||
diff --git a/deps/v8/src/objects/js-regexp.cc b/deps/v8/src/objects/js-regexp.cc
index e1e06cb12a..ce9a9a908c 100644
--- a/deps/v8/src/objects/js-regexp.cc
+++ b/deps/v8/src/objects/js-regexp.cc
@@ -6,6 +6,7 @@
#include "src/base/strings.h"
#include "src/common/globals.h"
+#include "src/objects/code.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/regexp/regexp.h"
@@ -105,8 +106,8 @@ Handle<JSRegExpResultIndices> JSRegExpResultIndices::BuildIndices(
return indices;
}
-uint32_t JSRegExp::BacktrackLimit() const {
- CHECK_EQ(TypeTag(), IRREGEXP);
+uint32_t JSRegExp::backtrack_limit() const {
+ CHECK_EQ(type_tag(), IRREGEXP);
return static_cast<uint32_t>(Smi::ToInt(DataAt(kIrregexpBacktrackLimit)));
}
@@ -156,18 +157,33 @@ MaybeHandle<JSRegExp> JSRegExp::New(Isolate* isolate, Handle<String> pattern,
return JSRegExp::Initialize(regexp, pattern, flags, backtrack_limit);
}
-Object JSRegExp::Code(bool is_latin1) const {
- DCHECK_EQ(TypeTag(), JSRegExp::IRREGEXP);
+Object JSRegExp::code(bool is_latin1) const {
+ DCHECK_EQ(type_tag(), JSRegExp::IRREGEXP);
Object value = DataAt(code_index(is_latin1));
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, value.IsSmi() || value.IsCodeT());
return value;
}
-Object JSRegExp::Bytecode(bool is_latin1) const {
- DCHECK_EQ(TypeTag(), JSRegExp::IRREGEXP);
+void JSRegExp::set_code(bool is_latin1, Handle<Code> code) {
+ SetDataAt(code_index(is_latin1), ToCodeT(*code));
+}
+
+Object JSRegExp::bytecode(bool is_latin1) const {
+ DCHECK(type_tag() == JSRegExp::IRREGEXP ||
+ type_tag() == JSRegExp::EXPERIMENTAL);
return DataAt(bytecode_index(is_latin1));
}
+void JSRegExp::set_bytecode_and_trampoline(Isolate* isolate,
+ Handle<ByteArray> bytecode) {
+ SetDataAt(kIrregexpLatin1BytecodeIndex, *bytecode);
+ SetDataAt(kIrregexpUC16BytecodeIndex, *bytecode);
+
+ Handle<Code> trampoline = BUILTIN_CODE(isolate, RegExpExperimentalTrampoline);
+ SetDataAt(JSRegExp::kIrregexpLatin1CodeIndex, ToCodeT(*trampoline));
+ SetDataAt(JSRegExp::kIrregexpUC16CodeIndex, ToCodeT(*trampoline));
+}
+
bool JSRegExp::ShouldProduceBytecode() {
return FLAG_regexp_interpret_all ||
(FLAG_regexp_tier_up && !MarkedForTierUp());
@@ -175,7 +191,7 @@ bool JSRegExp::ShouldProduceBytecode() {
// Only irregexps are subject to tier-up.
bool JSRegExp::CanTierUp() {
- return FLAG_regexp_tier_up && TypeTag() == JSRegExp::IRREGEXP;
+ return FLAG_regexp_tier_up && type_tag() == JSRegExp::IRREGEXP;
}
// An irregexp is considered to be marked for tier up if the tier-up ticks
@@ -192,7 +208,7 @@ bool JSRegExp::MarkedForTierUp() {
void JSRegExp::ResetLastTierUpTick() {
DCHECK(FLAG_regexp_tier_up);
- DCHECK_EQ(TypeTag(), JSRegExp::IRREGEXP);
+ DCHECK_EQ(type_tag(), JSRegExp::IRREGEXP);
int tier_up_ticks = Smi::ToInt(DataAt(kIrregexpTicksUntilTierUpIndex)) + 1;
FixedArray::cast(data()).set(JSRegExp::kIrregexpTicksUntilTierUpIndex,
Smi::FromInt(tier_up_ticks));
@@ -200,7 +216,7 @@ void JSRegExp::ResetLastTierUpTick() {
void JSRegExp::TierUpTick() {
DCHECK(FLAG_regexp_tier_up);
- DCHECK_EQ(TypeTag(), JSRegExp::IRREGEXP);
+ DCHECK_EQ(type_tag(), JSRegExp::IRREGEXP);
int tier_up_ticks = Smi::ToInt(DataAt(kIrregexpTicksUntilTierUpIndex));
if (tier_up_ticks == 0) {
return;
@@ -211,7 +227,7 @@ void JSRegExp::TierUpTick() {
void JSRegExp::MarkTierUpForNextExec() {
DCHECK(FLAG_regexp_tier_up);
- DCHECK_EQ(TypeTag(), JSRegExp::IRREGEXP);
+ DCHECK_EQ(type_tag(), JSRegExp::IRREGEXP);
FixedArray::cast(data()).set(JSRegExp::kIrregexpTicksUntilTierUpIndex,
Smi::zero());
}
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index 4671f6607b..36e6b791cd 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -37,14 +37,51 @@ namespace internal {
// - number of capture registers (output values) of the regexp.
class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
public:
- // Meaning of Type:
- // NOT_COMPILED: Initial value. No data has been stored in the JSRegExp yet.
- // ATOM: A simple string to match against using an indexOf operation.
- // IRREGEXP: Compiled with Irregexp.
- // EXPERIMENTAL: Compiled to use the new linear time engine.
- enum Type { NOT_COMPILED, ATOM, IRREGEXP, EXPERIMENTAL };
+ enum Type {
+ NOT_COMPILED, // Initial value. No data array has been set yet.
+ ATOM, // A simple string match.
+ IRREGEXP, // Compiled with Irregexp (code or bytecode).
+ EXPERIMENTAL, // Compiled to use the experimental linear time engine.
+ };
DEFINE_TORQUE_GENERATED_JS_REG_EXP_FLAGS()
+ V8_EXPORT_PRIVATE static MaybeHandle<JSRegExp> New(
+ Isolate* isolate, Handle<String> source, Flags flags,
+ uint32_t backtrack_limit = kNoBacktrackLimit);
+
+ static MaybeHandle<JSRegExp> Initialize(
+ Handle<JSRegExp> regexp, Handle<String> source, Flags flags,
+ uint32_t backtrack_limit = kNoBacktrackLimit);
+ static MaybeHandle<JSRegExp> Initialize(Handle<JSRegExp> regexp,
+ Handle<String> source,
+ Handle<String> flags_string);
+
+ DECL_ACCESSORS(last_index, Object)
+
+ // Instance fields accessors.
+ inline String source() const;
+ inline Flags flags() const;
+
+ // Data array field accessors.
+
+ inline Type type_tag() const;
+ inline String atom_pattern() const;
+ // This could be a Smi kUninitializedValue or Code.
+ V8_EXPORT_PRIVATE Object code(bool is_latin1) const;
+ V8_EXPORT_PRIVATE void set_code(bool is_unicode, Handle<Code> code);
+ // This could be a Smi kUninitializedValue or ByteArray.
+ V8_EXPORT_PRIVATE Object bytecode(bool is_latin1) const;
+ // Sets the bytecode as well as initializing trampoline slots to the
+ // RegExpInterpreterTrampoline.
+ void set_bytecode_and_trampoline(Isolate* isolate,
+ Handle<ByteArray> bytecode);
+ inline int max_register_count() const;
+ // Number of captures (without the match itself).
+ inline int capture_count() const;
+ inline Object capture_name_map();
+ inline void set_capture_name_map(Handle<FixedArray> capture_name_map);
+ uint32_t backtrack_limit() const;
+
static constexpr Flag AsJSRegExpFlag(RegExpFlag f) {
return static_cast<Flag>(f);
}
@@ -75,56 +112,32 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
STATIC_ASSERT(kFlagCount == v8::RegExp::kFlagCount);
STATIC_ASSERT(kFlagCount == kRegExpFlagCount);
- DECL_ACCESSORS(last_index, Object)
-
- // If the backtrack limit is set to this marker value, no limit is applied.
- static constexpr uint32_t kNoBacktrackLimit = 0;
-
- V8_EXPORT_PRIVATE static MaybeHandle<JSRegExp> New(
- Isolate* isolate, Handle<String> source, Flags flags,
- uint32_t backtrack_limit = kNoBacktrackLimit);
-
- static MaybeHandle<JSRegExp> Initialize(
- Handle<JSRegExp> regexp, Handle<String> source, Flags flags,
- uint32_t backtrack_limit = kNoBacktrackLimit);
- static MaybeHandle<JSRegExp> Initialize(Handle<JSRegExp> regexp,
- Handle<String> source,
- Handle<String> flags_string);
-
static base::Optional<Flags> FlagsFromString(Isolate* isolate,
Handle<String> flags);
V8_EXPORT_PRIVATE static Handle<String> StringFromFlags(Isolate* isolate,
Flags flags);
+ inline String EscapedPattern();
+
bool CanTierUp();
bool MarkedForTierUp();
void ResetLastTierUpTick();
void TierUpTick();
void MarkTierUpForNextExec();
- inline Type TypeTag() const;
- static bool TypeSupportsCaptures(Type t) {
+ bool ShouldProduceBytecode();
+ inline bool HasCompiledCode() const;
+ inline void DiscardCompiledCodeForSerialization();
+
+ static constexpr bool TypeSupportsCaptures(Type t) {
return t == IRREGEXP || t == EXPERIMENTAL;
}
- // Maximum number of captures allowed.
- static constexpr int kMaxCaptures = 1 << 16;
-
- // Number of captures (without the match itself).
- inline int CaptureCount() const;
// Each capture (including the match itself) needs two registers.
- static int RegistersForCaptureCount(int count) { return (count + 1) * 2; }
-
- inline int MaxRegisterCount() const;
- inline Flags GetFlags() const;
- inline String Pattern();
- inline String EscapedPattern();
- inline Object CaptureNameMap();
- inline Object DataAt(int index) const;
- // Set implementation data after the object has been prepared.
- inline void SetDataAt(int index, Object value);
- inline void SetCaptureNameMap(Handle<FixedArray> capture_name_map);
+ static constexpr int RegistersForCaptureCount(int count) {
+ return (count + 1) * 2;
+ }
static constexpr int code_index(bool is_latin1) {
return is_latin1 ? kIrregexpLatin1CodeIndex : kIrregexpUC16CodeIndex;
@@ -135,17 +148,6 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
: kIrregexpUC16BytecodeIndex;
}
- // This could be a Smi kUninitializedValue or Code.
- V8_EXPORT_PRIVATE Object Code(bool is_latin1) const;
- // This could be a Smi kUninitializedValue or ByteArray.
- V8_EXPORT_PRIVATE Object Bytecode(bool is_latin1) const;
-
- bool ShouldProduceBytecode();
- inline bool HasCompiledCode() const;
- inline void DiscardCompiledCodeForSerialization();
-
- uint32_t BacktrackLimit() const;
-
// Dispatched behavior.
DECL_PRINTER(JSRegExp)
DECL_VERIFIER(JSRegExp)
@@ -158,59 +160,49 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
static constexpr int kInitialLastIndexValue = 0;
// Indices in the data array.
- static const int kTagIndex = 0;
- static const int kSourceIndex = kTagIndex + 1;
- static const int kFlagsIndex = kSourceIndex + 1;
- static const int kDataIndex = kFlagsIndex + 1;
-
- // TODO(jgruber): Rename kDataIndex to something more appropriate.
- // There is no 'data' field, kDataIndex is just a marker for the
- // first non-generic index.
- static constexpr int kMinDataArrayLength = kDataIndex;
+ static constexpr int kTagIndex = 0;
+ static constexpr int kSourceIndex = kTagIndex + 1;
+ static constexpr int kFlagsIndex = kSourceIndex + 1;
+ static constexpr int kFirstTypeSpecificIndex = kFlagsIndex + 1;
+ static constexpr int kMinDataArrayLength = kFirstTypeSpecificIndex;
// The data fields are used in different ways depending on the
// value of the tag.
// Atom regexps (literal strings).
- static const int kAtomPatternIndex = kDataIndex;
-
- static const int kAtomDataSize = kAtomPatternIndex + 1;
-
- // Irregexp compiled code or trampoline to interpreter for Latin1. If
- // compilation fails, this fields hold an exception object that should be
- // thrown if the regexp is used again.
- static const int kIrregexpLatin1CodeIndex = kDataIndex;
- // Irregexp compiled code or trampoline to interpreter for UC16. If
- // compilation fails, this fields hold an exception object that should be
- // thrown if the regexp is used again.
- static const int kIrregexpUC16CodeIndex = kDataIndex + 1;
- // Bytecode to interpret the regexp for Latin1. Contains kUninitializedValue
- // if we haven't compiled the regexp yet, regexp are always compiled or if
- // tier-up has happened (i.e. when kIrregexpLatin1CodeIndex contains native
- // irregexp code).
- static const int kIrregexpLatin1BytecodeIndex = kDataIndex + 2;
- // Bytecode to interpret the regexp for UC16. Contains kUninitializedValue if
- // we haven't compiled the regxp yet, regexp are always compiled or if tier-up
- // has happened (i.e. when kIrregexpUC16CodeIndex contains native irregexp
- // code).
- static const int kIrregexpUC16BytecodeIndex = kDataIndex + 3;
+ static constexpr int kAtomPatternIndex = kFirstTypeSpecificIndex;
+ static constexpr int kAtomDataSize = kAtomPatternIndex + 1;
+
+ // A Code object or a Smi marker value equal to kUninitializedValue.
+ static constexpr int kIrregexpLatin1CodeIndex = kFirstTypeSpecificIndex;
+ static constexpr int kIrregexpUC16CodeIndex = kIrregexpLatin1CodeIndex + 1;
+ // A ByteArray object or a Smi marker value equal to kUninitializedValue.
+ static constexpr int kIrregexpLatin1BytecodeIndex =
+ kIrregexpUC16CodeIndex + 1;
+ static constexpr int kIrregexpUC16BytecodeIndex =
+ kIrregexpLatin1BytecodeIndex + 1;
// Maximal number of registers used by either Latin1 or UC16.
// Only used to check that there is enough stack space
- static const int kIrregexpMaxRegisterCountIndex = kDataIndex + 4;
+ static constexpr int kIrregexpMaxRegisterCountIndex =
+ kIrregexpUC16BytecodeIndex + 1;
// Number of captures in the compiled regexp.
- static const int kIrregexpCaptureCountIndex = kDataIndex + 5;
+ static constexpr int kIrregexpCaptureCountIndex =
+ kIrregexpMaxRegisterCountIndex + 1;
// Maps names of named capture groups (at indices 2i) to their corresponding
// (1-based) capture group indices (at indices 2i + 1).
- static const int kIrregexpCaptureNameMapIndex = kDataIndex + 6;
+ static constexpr int kIrregexpCaptureNameMapIndex =
+ kIrregexpCaptureCountIndex + 1;
// Tier-up ticks are set to the value of the tier-up ticks flag. The value is
// decremented on each execution of the bytecode, so that the tier-up
// happens once the ticks reach zero.
// This value is ignored if the regexp-tier-up flag isn't turned on.
- static const int kIrregexpTicksUntilTierUpIndex = kDataIndex + 7;
+ static constexpr int kIrregexpTicksUntilTierUpIndex =
+ kIrregexpCaptureNameMapIndex + 1;
// A smi containing either the backtracking limit or kNoBacktrackLimit.
// TODO(jgruber): If needed, this limit could be packed into other fields
// above to save space.
- static const int kIrregexpBacktrackLimit = kDataIndex + 8;
- static const int kIrregexpDataSize = kDataIndex + 9;
+ static constexpr int kIrregexpBacktrackLimit =
+ kIrregexpTicksUntilTierUpIndex + 1;
+ static constexpr int kIrregexpDataSize = kIrregexpBacktrackLimit + 1;
// TODO(mbid,v8:10765): At the moment the EXPERIMENTAL data array conforms
// to the format of an IRREGEXP data array, with most fields set to some
@@ -222,27 +214,39 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
static constexpr int kExperimentalDataSize = kIrregexpDataSize;
// In-object fields.
- static const int kLastIndexFieldIndex = 0;
- static const int kInObjectFieldCount = 1;
+ static constexpr int kLastIndexFieldIndex = 0;
+ static constexpr int kInObjectFieldCount = 1;
// The actual object size including in-object fields.
- static int Size() { return kHeaderSize + kInObjectFieldCount * kTaggedSize; }
+ static constexpr int Size() {
+ return kHeaderSize + kInObjectFieldCount * kTaggedSize;
+ }
// Descriptor array index to important methods in the prototype.
- static const int kExecFunctionDescriptorIndex = 1;
- static const int kSymbolMatchFunctionDescriptorIndex = 14;
- static const int kSymbolMatchAllFunctionDescriptorIndex = 15;
- static const int kSymbolReplaceFunctionDescriptorIndex = 16;
- static const int kSymbolSearchFunctionDescriptorIndex = 17;
- static const int kSymbolSplitFunctionDescriptorIndex = 18;
+ static constexpr int kExecFunctionDescriptorIndex = 1;
+ static constexpr int kSymbolMatchFunctionDescriptorIndex = 14;
+ static constexpr int kSymbolMatchAllFunctionDescriptorIndex = 15;
+ static constexpr int kSymbolReplaceFunctionDescriptorIndex = 16;
+ static constexpr int kSymbolSearchFunctionDescriptorIndex = 17;
+ static constexpr int kSymbolSplitFunctionDescriptorIndex = 18;
// The uninitialized value for a regexp code object.
- static const int kUninitializedValue = -1;
+ static constexpr int kUninitializedValue = -1;
+
+ // If the backtrack limit is set to this marker value, no limit is applied.
+ static constexpr uint32_t kNoBacktrackLimit = 0;
// The heuristic value for the length of the subject string for which we
// tier-up to the compiler immediately, instead of using the interpreter.
static constexpr int kTierUpForSubjectLengthValue = 1000;
+ // Maximum number of captures allowed.
+ static constexpr int kMaxCaptures = 1 << 16;
+
+ private:
+ inline Object DataAt(int index) const;
+ inline void SetDataAt(int index, Object value);
+
TQ_OBJECT_CONSTRUCTORS(JSRegExp)
};
@@ -262,17 +266,17 @@ class JSRegExpResult
// instance type as JSArray.
// Indices of in-object properties.
- static const int kIndexIndex = 0;
- static const int kInputIndex = 1;
- static const int kGroupsIndex = 2;
+ static constexpr int kIndexIndex = 0;
+ static constexpr int kInputIndex = 1;
+ static constexpr int kGroupsIndex = 2;
// Private internal only fields.
- static const int kNamesIndex = 3;
- static const int kRegExpInputIndex = 4;
- static const int kRegExpLastIndex = 5;
- static const int kInObjectPropertyCount = 6;
+ static constexpr int kNamesIndex = 3;
+ static constexpr int kRegExpInputIndex = 4;
+ static constexpr int kRegExpLastIndex = 5;
+ static constexpr int kInObjectPropertyCount = 6;
- static const int kMapIndexInContext = Context::REGEXP_RESULT_MAP_INDEX;
+ static constexpr int kMapIndexInContext = Context::REGEXP_RESULT_MAP_INDEX;
TQ_OBJECT_CONSTRUCTORS(JSRegExpResult)
};
@@ -284,8 +288,8 @@ class JSRegExpResultWithIndices
static_assert(
JSRegExpResult::kInObjectPropertyCount == 6,
"JSRegExpResultWithIndices must be a subclass of JSRegExpResult");
- static const int kIndicesIndex = 6;
- static const int kInObjectPropertyCount = 7;
+ static constexpr int kIndicesIndex = 6;
+ static constexpr int kInObjectPropertyCount = 7;
TQ_OBJECT_CONSTRUCTORS(JSRegExpResultWithIndices)
};
@@ -305,11 +309,11 @@ class JSRegExpResultIndices
Handle<Object> maybe_names);
// Indices of in-object properties.
- static const int kGroupsIndex = 0;
- static const int kInObjectPropertyCount = 1;
+ static constexpr int kGroupsIndex = 0;
+ static constexpr int kInObjectPropertyCount = 1;
// Descriptor index of groups.
- static const int kGroupsDescriptorIndex = 1;
+ static constexpr int kGroupsDescriptorIndex = 1;
TQ_OBJECT_CONSTRUCTORS(JSRegExpResultIndices)
};
diff --git a/deps/v8/src/objects/js-relative-time-format.cc b/deps/v8/src/objects/js-relative-time-format.cc
index 34db9ad1bf..d6a65d95ca 100644
--- a/deps/v8/src/objects/js-relative-time-format.cc
+++ b/deps/v8/src/objects/js-relative-time-format.cc
@@ -17,7 +17,9 @@
#include "src/objects/intl-objects.h"
#include "src/objects/js-number-format.h"
#include "src/objects/js-relative-time-format-inl.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/option-utils.h"
#include "unicode/decimfmt.h"
#include "unicode/numfmt.h"
#include "unicode/reldatefmt.h"
@@ -78,8 +80,7 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
Handle<JSReceiver> options;
const char* service = "Intl.RelativeTimeFormat";
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, options,
- Intl::CoerceOptionsToObject(isolate, input_options, service),
+ isolate, options, CoerceOptionsToObject(isolate, input_options, service),
JSRelativeTimeFormat);
// 4. Let opt be a new Record.
@@ -147,7 +148,7 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
// 16. Let s be ? GetOption(options, "style", "string",
// «"long", "short", "narrow"», "long").
- Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
+ Maybe<Style> maybe_style = GetStringOption<Style>(
isolate, options, "style", service, {"long", "short", "narrow"},
{Style::LONG, Style::SHORT, Style::NARROW}, Style::LONG);
MAYBE_RETURN(maybe_style, MaybeHandle<JSRelativeTimeFormat>());
@@ -157,7 +158,7 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
// 18. Let numeric be ? GetOption(options, "numeric", "string",
// «"always", "auto"», "always").
- Maybe<Numeric> maybe_numeric = Intl::GetStringOption<Numeric>(
+ Maybe<Numeric> maybe_numeric = GetStringOption<Numeric>(
isolate, options, "numeric", service, {"always", "auto"},
{Numeric::ALWAYS, Numeric::AUTO}, Numeric::ALWAYS);
MAYBE_RETURN(maybe_numeric, MaybeHandle<JSRelativeTimeFormat>());
diff --git a/deps/v8/src/objects/js-segment-iterator.cc b/deps/v8/src/objects/js-segment-iterator.cc
index ff10303dbb..4fa3f173cc 100644
--- a/deps/v8/src/objects/js-segment-iterator.cc
+++ b/deps/v8/src/objects/js-segment-iterator.cc
@@ -17,7 +17,7 @@
#include "src/objects/intl-objects.h"
#include "src/objects/js-segment-iterator-inl.h"
#include "src/objects/js-segments.h"
-#include "src/objects/managed.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
#include "unicode/brkiter.h"
diff --git a/deps/v8/src/objects/js-segmenter.cc b/deps/v8/src/objects/js-segmenter.cc
index 386150613a..be04f14052 100644
--- a/deps/v8/src/objects/js-segmenter.cc
+++ b/deps/v8/src/objects/js-segmenter.cc
@@ -16,8 +16,9 @@
#include "src/heap/factory.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-segmenter-inl.h"
-#include "src/objects/managed.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/option-utils.h"
#include "unicode/brkiter.h"
namespace v8 {
@@ -36,9 +37,9 @@ MaybeHandle<JSSegmenter> JSSegmenter::New(Isolate* isolate, Handle<Map> map,
Handle<JSReceiver> options;
const char* service = "Intl.Segmenter";
// 5. Let options be GetOptionsObject(_options_).
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, options, Intl::GetOptionsObject(isolate, input_options, service),
- JSSegmenter);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
+ GetOptionsObject(isolate, input_options, service),
+ JSSegmenter);
// 7. Let opt be a new Record.
// 8. Let matcher be ? GetOption(options, "localeMatcher", "string",
@@ -68,7 +69,7 @@ MaybeHandle<JSSegmenter> JSSegmenter::New(Isolate* isolate, Handle<Map> map,
// 13. Let granularity be ? GetOption(options, "granularity", "string", «
// "grapheme", "word", "sentence" », "grapheme").
- Maybe<Granularity> maybe_granularity = Intl::GetStringOption<Granularity>(
+ Maybe<Granularity> maybe_granularity = GetStringOption<Granularity>(
isolate, options, "granularity", service,
{"grapheme", "word", "sentence"},
{Granularity::GRAPHEME, Granularity::WORD, Granularity::SENTENCE},
diff --git a/deps/v8/src/objects/js-segments.cc b/deps/v8/src/objects/js-segments.cc
index ec3f8f6a2c..84d8197e57 100644
--- a/deps/v8/src/objects/js-segments.cc
+++ b/deps/v8/src/objects/js-segments.cc
@@ -18,7 +18,7 @@
#include "src/objects/js-segment-iterator.h"
#include "src/objects/js-segmenter-inl.h"
#include "src/objects/js-segments-inl.h"
-#include "src/objects/managed.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
#include "unicode/brkiter.h"
diff --git a/deps/v8/src/objects/js-weak-refs.h b/deps/v8/src/objects/js-weak-refs.h
index b2dc41b570..57f765b282 100644
--- a/deps/v8/src/objects/js-weak-refs.h
+++ b/deps/v8/src/objects/js-weak-refs.h
@@ -71,7 +71,6 @@ class JSFinalizationRegistry
// Internal object for storing weak references in JSFinalizationRegistry.
class WeakCell : public TorqueGeneratedWeakCell<WeakCell, HeapObject> {
public:
- DECL_PRINTER(WeakCell)
EXPORT_DECL_VERIFIER(WeakCell)
class BodyDescriptor;
diff --git a/deps/v8/src/objects/keys.cc b/deps/v8/src/objects/keys.cc
index 815d9ac504..acd94fcf86 100644
--- a/deps/v8/src/objects/keys.cc
+++ b/deps/v8/src/objects/keys.cc
@@ -405,7 +405,7 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
Object key = descriptors->GetKey(i);
if (key.IsSymbol()) continue;
keys->set(index, key);
- if (details.location() != kField) fields_only = false;
+ if (details.location() != PropertyLocation::kField) fields_only = false;
index++;
}
DCHECK_EQ(index, keys->length());
@@ -422,7 +422,7 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
Object key = descriptors->GetKey(i);
if (key.IsSymbol()) continue;
DCHECK_EQ(kData, details.kind());
- DCHECK_EQ(kField, details.location());
+ DCHECK_EQ(PropertyLocation::kField, details.location());
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
indices->set(index, Smi::FromInt(field_index.GetLoadByFieldIndex()));
index++;
diff --git a/deps/v8/src/objects/literal-objects.h b/deps/v8/src/objects/literal-objects.h
index a20347c4a7..242b3a6469 100644
--- a/deps/v8/src/objects/literal-objects.h
+++ b/deps/v8/src/objects/literal-objects.h
@@ -80,7 +80,6 @@ class RegExpBoilerplateDescription
RegExpBoilerplateDescription, Struct> {
public:
// Dispatched behavior.
- DECL_PRINTER(RegExpBoilerplateDescription)
void BriefPrintDetails(std::ostream& os);
private:
diff --git a/deps/v8/src/objects/lookup.cc b/deps/v8/src/objects/lookup.cc
index eb09aa6031..b53bbeb0e7 100644
--- a/deps/v8/src/objects/lookup.cc
+++ b/deps/v8/src/objects/lookup.cc
@@ -889,7 +889,7 @@ Handle<Object> LookupIterator::FetchValue(
result = holder_->property_dictionary(isolate_).ValueAt(
isolate_, dictionary_entry());
}
- } else if (property_details_.location() == kField) {
+ } else if (property_details_.location() == PropertyLocation::kField) {
DCHECK_EQ(kData, property_details_.kind());
#if V8_ENABLE_WEBASSEMBLY
if (V8_UNLIKELY(holder_->IsWasmObject(isolate_))) {
@@ -932,7 +932,7 @@ Handle<Object> LookupIterator::FetchValue(
bool LookupIterator::IsConstFieldValueEqualTo(Object value) const {
DCHECK(!IsElement(*holder_));
DCHECK(holder_->HasFastProperties(isolate_));
- DCHECK_EQ(kField, property_details_.location());
+ DCHECK_EQ(PropertyLocation::kField, property_details_.location());
DCHECK_EQ(PropertyConstness::kConst, property_details_.constness());
if (value.IsUninitialized(isolate())) {
// Storing uninitialized value means that we are preparing for a computed
@@ -1004,7 +1004,7 @@ bool LookupIterator::IsConstDictValueEqualTo(Object value) const {
int LookupIterator::GetFieldDescriptorIndex() const {
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties());
- DCHECK_EQ(kField, property_details_.location());
+ DCHECK_EQ(PropertyLocation::kField, property_details_.location());
DCHECK_EQ(kData, property_details_.kind());
// TODO(jkummerow): Propagate InternalIndex further.
return descriptor_number().as_int();
@@ -1013,7 +1013,7 @@ int LookupIterator::GetFieldDescriptorIndex() const {
int LookupIterator::GetAccessorIndex() const {
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties(isolate_));
- DCHECK_EQ(kDescriptor, property_details_.location());
+ DCHECK_EQ(PropertyLocation::kDescriptor, property_details_.location());
DCHECK_EQ(kAccessor, property_details_.kind());
return descriptor_number().as_int();
}
@@ -1021,7 +1021,7 @@ int LookupIterator::GetAccessorIndex() const {
FieldIndex LookupIterator::GetFieldIndex() const {
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties(isolate_));
- DCHECK_EQ(kField, property_details_.location());
+ DCHECK_EQ(PropertyLocation::kField, property_details_.location());
DCHECK(!IsElement(*holder_));
return FieldIndex::ForDescriptor(holder_->map(isolate_), descriptor_number());
}
@@ -1062,7 +1062,7 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
accessor->Set(object, number_, *value);
} else if (holder->HasFastProperties(isolate_)) {
DCHECK(holder->IsJSObject(isolate_));
- if (property_details_.location() == kField) {
+ if (property_details_.location() == PropertyLocation::kField) {
// Check that in case of VariableMode::kConst field the existing value is
// equal to |value|.
DCHECK_IMPLIES(!initializing_store && property_details_.constness() ==
@@ -1071,7 +1071,7 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
JSObject::cast(*holder).WriteToField(descriptor_number(),
property_details_, *value);
} else {
- DCHECK_EQ(kDescriptor, property_details_.location());
+ DCHECK_EQ(PropertyLocation::kDescriptor, property_details_.location());
DCHECK_EQ(PropertyConstness::kConst, property_details_.constness());
}
} else if (holder->IsJSGlobalObject(isolate_)) {
@@ -1507,7 +1507,8 @@ ConcurrentLookupIterator::Result ConcurrentLookupIterator::TryGetOwnChar(
uint16_t charcode;
{
SharedStringAccessGuardIfNeeded access_guard(local_isolate);
- charcode = string.Get(static_cast<int>(index));
+ charcode = string.Get(static_cast<int>(index), PtrComprCageBase(isolate),
+ access_guard);
}
if (charcode > unibrow::Latin1::kMaxChar) return kGaveUp;
diff --git a/deps/v8/src/objects/managed-inl.h b/deps/v8/src/objects/managed-inl.h
new file mode 100644
index 0000000000..a8a54e18c0
--- /dev/null
+++ b/deps/v8/src/objects/managed-inl.h
@@ -0,0 +1,64 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_MANAGED_INL_H_
+#define V8_OBJECTS_MANAGED_INL_H_
+
+#include "src/handles/global-handles-inl.h"
+#include "src/objects/managed.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+template <class CppType>
+template <typename... Args>
+Handle<Managed<CppType>> Managed<CppType>::Allocate(Isolate* isolate,
+ size_t estimated_size,
+ Args&&... args) {
+ return FromSharedPtr(isolate, estimated_size,
+ std::make_shared<CppType>(std::forward<Args>(args)...));
+}
+
+// static
+template <class CppType>
+Handle<Managed<CppType>> Managed<CppType>::FromRawPtr(Isolate* isolate,
+ size_t estimated_size,
+ CppType* ptr) {
+ return FromSharedPtr(isolate, estimated_size, std::shared_ptr<CppType>{ptr});
+}
+
+// static
+template <class CppType>
+Handle<Managed<CppType>> Managed<CppType>::FromUniquePtr(
+ Isolate* isolate, size_t estimated_size,
+ std::unique_ptr<CppType> unique_ptr) {
+ return FromSharedPtr(isolate, estimated_size, std::move(unique_ptr));
+}
+
+// static
+template <class CppType>
+Handle<Managed<CppType>> Managed<CppType>::FromSharedPtr(
+ Isolate* isolate, size_t estimated_size,
+ std::shared_ptr<CppType> shared_ptr) {
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(estimated_size);
+ auto destructor = new ManagedPtrDestructor(
+ estimated_size, new std::shared_ptr<CppType>{std::move(shared_ptr)},
+ Destructor);
+ Handle<Managed<CppType>> handle = Handle<Managed<CppType>>::cast(
+ isolate->factory()->NewForeign(reinterpret_cast<Address>(destructor)));
+ Handle<Object> global_handle = isolate->global_handles()->Create(*handle);
+ destructor->global_handle_location_ = global_handle.location();
+ GlobalHandles::MakeWeak(destructor->global_handle_location_, destructor,
+ &ManagedObjectFinalizer,
+ v8::WeakCallbackType::kParameter);
+ isolate->RegisterManagedPtrDestructor(destructor);
+ return handle;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_MANAGED_INL_H_
diff --git a/deps/v8/src/objects/managed.cc b/deps/v8/src/objects/managed.cc
index 8376ccb547..8853fb95d2 100644
--- a/deps/v8/src/objects/managed.cc
+++ b/deps/v8/src/objects/managed.cc
@@ -4,6 +4,8 @@
#include "src/objects/managed.h"
+#include "src/handles/global-handles-inl.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/objects/managed.h b/deps/v8/src/objects/managed.h
index 8d56a13aef..b681230ba2 100644
--- a/deps/v8/src/objects/managed.h
+++ b/deps/v8/src/objects/managed.h
@@ -7,7 +7,6 @@
#include <memory>
#include "src/execution/isolate.h"
-#include "src/handles/global-handles.h"
#include "src/handles/handles.h"
#include "src/heap/factory.h"
#include "src/objects/foreign.h"
@@ -65,49 +64,25 @@ class Managed : public Foreign {
template <typename... Args>
static Handle<Managed<CppType>> Allocate(Isolate* isolate,
size_t estimated_size,
- Args&&... args) {
- return FromSharedPtr(
- isolate, estimated_size,
- std::make_shared<CppType>(std::forward<Args>(args)...));
- }
+ Args&&... args);
// Create a {Managed<CppType>} from an existing raw {CppType*}. The returned
// object will now own the memory pointed to by {CppType}.
static Handle<Managed<CppType>> FromRawPtr(Isolate* isolate,
size_t estimated_size,
- CppType* ptr) {
- return FromSharedPtr(isolate, estimated_size,
- std::shared_ptr<CppType>{ptr});
- }
+ CppType* ptr);
// Create a {Managed<CppType>} from an existing {std::unique_ptr<CppType>}.
// The returned object will now own the memory pointed to by {CppType}, and
// the unique pointer will be released.
static Handle<Managed<CppType>> FromUniquePtr(
Isolate* isolate, size_t estimated_size,
- std::unique_ptr<CppType> unique_ptr) {
- return FromSharedPtr(isolate, estimated_size, std::move(unique_ptr));
- }
+ std::unique_ptr<CppType> unique_ptr);
// Create a {Managed<CppType>} from an existing {std::shared_ptr<CppType>}.
static Handle<Managed<CppType>> FromSharedPtr(
Isolate* isolate, size_t estimated_size,
- std::shared_ptr<CppType> shared_ptr) {
- reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(estimated_size);
- auto destructor = new ManagedPtrDestructor(
- estimated_size, new std::shared_ptr<CppType>{std::move(shared_ptr)},
- Destructor);
- Handle<Managed<CppType>> handle = Handle<Managed<CppType>>::cast(
- isolate->factory()->NewForeign(reinterpret_cast<Address>(destructor)));
- Handle<Object> global_handle = isolate->global_handles()->Create(*handle);
- destructor->global_handle_location_ = global_handle.location();
- GlobalHandles::MakeWeak(destructor->global_handle_location_, destructor,
- &ManagedObjectFinalizer,
- v8::WeakCallbackType::kParameter);
- isolate->RegisterManagedPtrDestructor(destructor);
- return handle;
- }
+ std::shared_ptr<CppType> shared_ptr);
private:
// Internally this {Foreign} object stores a pointer to a new
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index dc37a119fa..c8eb400424 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -654,7 +654,8 @@ bool Map::CanBeDeprecated() const {
for (InternalIndex i : IterateOwnDescriptors()) {
PropertyDetails details = instance_descriptors(kRelaxedLoad).GetDetails(i);
if (details.representation().MightCauseMapDeprecation()) return true;
- if (details.kind() == kData && details.location() == kDescriptor) {
+ if (details.kind() == kData &&
+ details.location() == PropertyLocation::kDescriptor) {
return true;
}
}
@@ -729,7 +730,7 @@ void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) {
set_may_have_interesting_symbols(true);
}
PropertyDetails details = desc->GetDetails();
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
DCHECK_GT(UnusedPropertyFields(), 0);
AccountAddedPropertyField();
}
@@ -737,7 +738,8 @@ void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) {
// This function does not support appending double field descriptors and
// it should never try to (otherwise, layout descriptor must be updated too).
#ifdef DEBUG
- DCHECK(details.location() != kField || !details.representation().IsDouble());
+ DCHECK(details.location() != PropertyLocation::kField ||
+ !details.representation().IsDouble());
#endif
}
diff --git a/deps/v8/src/objects/map-updater.cc b/deps/v8/src/objects/map-updater.cc
index ba7961a9ca..3bfd3922a3 100644
--- a/deps/v8/src/objects/map-updater.cc
+++ b/deps/v8/src/objects/map-updater.cc
@@ -130,20 +130,20 @@ PropertyDetails MapUpdater::GetDetails(InternalIndex descriptor) const {
Object MapUpdater::GetValue(InternalIndex descriptor) const {
DCHECK(descriptor.is_found());
if (descriptor == modified_descriptor_) {
- DCHECK_EQ(kDescriptor, new_location_);
+ DCHECK_EQ(PropertyLocation::kDescriptor, new_location_);
return *new_value_;
}
- DCHECK_EQ(kDescriptor, GetDetails(descriptor).location());
+ DCHECK_EQ(PropertyLocation::kDescriptor, GetDetails(descriptor).location());
return old_descriptors_->GetStrongValue(descriptor);
}
FieldType MapUpdater::GetFieldType(InternalIndex descriptor) const {
DCHECK(descriptor.is_found());
if (descriptor == modified_descriptor_) {
- DCHECK_EQ(kField, new_location_);
+ DCHECK_EQ(PropertyLocation::kField, new_location_);
return *new_field_type_;
}
- DCHECK_EQ(kField, GetDetails(descriptor).location());
+ DCHECK_EQ(PropertyLocation::kField, GetDetails(descriptor).location());
return old_descriptors_->GetFieldType(descriptor);
}
@@ -153,7 +153,7 @@ Handle<FieldType> MapUpdater::GetOrComputeFieldType(
DCHECK(descriptor.is_found());
// |location| is just a pre-fetched GetDetails(descriptor).location().
DCHECK_EQ(location, GetDetails(descriptor).location());
- if (location == kField) {
+ if (location == PropertyLocation::kField) {
return handle(GetFieldType(descriptor), isolate_);
} else {
return GetValue(descriptor).OptimalType(isolate_, representation);
@@ -165,7 +165,7 @@ Handle<FieldType> MapUpdater::GetOrComputeFieldType(
PropertyLocation location, Representation representation) {
// |location| is just a pre-fetched GetDetails(descriptor).location().
DCHECK_EQ(descriptors->GetDetails(descriptor).location(), location);
- if (location == kField) {
+ if (location == PropertyLocation::kField) {
return handle(descriptors->GetFieldType(descriptor), isolate_);
} else {
return descriptors->GetStrongValue(descriptor)
@@ -188,7 +188,7 @@ Handle<Map> MapUpdater::ReconfigureToDataField(InternalIndex descriptor,
modified_descriptor_ = descriptor;
new_kind_ = kData;
new_attributes_ = attributes;
- new_location_ = kField;
+ new_location_ = PropertyLocation::kField;
PropertyDetails old_details =
old_descriptors_->GetDetails(modified_descriptor_);
@@ -460,7 +460,7 @@ MapUpdater::State MapUpdater::TryReconfigureToDataFieldInplace() {
DCHECK_EQ(new_kind_, old_details.kind());
DCHECK_EQ(new_attributes_, old_details.attributes());
- DCHECK_EQ(kField, old_details.location());
+ DCHECK_EQ(PropertyLocation::kField, old_details.location());
if (FLAG_trace_generalization) {
PrintGeneralization(
isolate_, old_map_, stdout, "uninitialized field", modified_descriptor_,
@@ -581,7 +581,7 @@ MapUpdater::State MapUpdater::FindRootMap() {
old_details.attributes() != new_attributes_) {
return Normalize("Normalize_RootModification1");
}
- if (old_details.location() != kField) {
+ if (old_details.location() != PropertyLocation::kField) {
return Normalize("Normalize_RootModification2");
}
if (!new_representation_.fits_into(old_details.representation())) {
@@ -590,7 +590,7 @@ MapUpdater::State MapUpdater::FindRootMap() {
DCHECK_EQ(kData, old_details.kind());
DCHECK_EQ(kData, new_kind_);
- DCHECK_EQ(kField, new_location_);
+ DCHECK_EQ(PropertyLocation::kField, new_location_);
// Modify root map in-place. The GeneralizeField method is a no-op
// if the {old_map_} is already general enough to hold the requested
@@ -645,7 +645,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
tmp_representation = generalized;
}
- if (tmp_details.location() == kField) {
+ if (tmp_details.location() == PropertyLocation::kField) {
Handle<FieldType> old_field_type =
GetOrComputeFieldType(i, old_details.location(), tmp_representation);
GeneralizeField(tmp_map, i, old_details.constness(), tmp_representation,
@@ -676,12 +676,12 @@ MapUpdater::State MapUpdater::FindTargetMap() {
DCHECK(IsGeneralizableTo(new_constness_, details.constness()));
DCHECK_EQ(new_location_, details.location());
DCHECK(new_representation_.fits_into(details.representation()));
- if (new_location_ == kField) {
- DCHECK_EQ(kField, details.location());
+ if (new_location_ == PropertyLocation::kField) {
+ DCHECK_EQ(PropertyLocation::kField, details.location());
DCHECK(new_field_type_->NowIs(
target_descriptors.GetFieldType(modified_descriptor_)));
} else {
- DCHECK(details.location() == kField ||
+ DCHECK(details.location() == PropertyLocation::kField ||
EqualImmutableValues(
*new_value_,
target_descriptors.GetStrongValue(modified_descriptor_)));
@@ -766,7 +766,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
int current_offset = 0;
for (InternalIndex i : InternalIndex::Range(root_nof)) {
PropertyDetails old_details = old_descriptors_->GetDetails(i);
- if (old_details.location() == kField) {
+ if (old_details.location() == PropertyLocation::kField) {
current_offset += old_details.field_width_in_words();
}
Descriptor d(handle(GetKey(i), isolate_),
@@ -793,22 +793,22 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
// Note: failed values equality check does not invalidate per-object
// property constness.
PropertyLocation next_location =
- old_details.location() == kField ||
- target_details.location() == kField ||
+ old_details.location() == PropertyLocation::kField ||
+ target_details.location() == PropertyLocation::kField ||
!EqualImmutableValues(target_descriptors->GetStrongValue(i),
GetValue(i))
- ? kField
- : kDescriptor;
+ ? PropertyLocation::kField
+ : PropertyLocation::kDescriptor;
// Ensure that mutable values are stored in fields.
DCHECK_IMPLIES(next_constness == PropertyConstness::kMutable,
- next_location == kField);
+ next_location == PropertyLocation::kField);
Representation next_representation =
old_details.representation().generalize(
target_details.representation());
- if (next_location == kField) {
+ if (next_location == PropertyLocation::kField) {
Handle<FieldType> old_field_type =
GetOrComputeFieldType(i, old_details.location(), next_representation);
@@ -837,7 +837,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
current_offset += d.GetDetails().field_width_in_words();
new_descriptors->Set(i, &d);
} else {
- DCHECK_EQ(kDescriptor, next_location);
+ DCHECK_EQ(PropertyLocation::kDescriptor, next_location);
DCHECK_EQ(PropertyConstness::kConst, next_constness);
Handle<Object> value(GetValue(i), isolate_);
@@ -860,7 +860,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
Representation next_representation = old_details.representation();
Descriptor d;
- if (next_location == kField) {
+ if (next_location == PropertyLocation::kField) {
Handle<FieldType> next_field_type =
GetOrComputeFieldType(i, old_details.location(), next_representation);
@@ -885,7 +885,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
current_offset += d.GetDetails().field_width_in_words();
new_descriptors->Set(i, &d);
} else {
- DCHECK_EQ(kDescriptor, next_location);
+ DCHECK_EQ(PropertyLocation::kDescriptor, next_location);
DCHECK_EQ(PropertyConstness::kConst, next_constness);
Handle<Object> value(GetValue(i), isolate_);
@@ -924,7 +924,7 @@ Handle<Map> MapUpdater::FindSplitMap(Handle<DescriptorArray> descriptors) {
if (details.location() != next_details.location()) break;
if (!details.representation().Equals(next_details.representation())) break;
- if (next_details.location() == kField) {
+ if (next_details.location() == PropertyLocation::kField) {
FieldType next_field_type = next_descriptors.GetFieldType(i);
if (!descriptors->GetFieldType(i).NowIs(next_field_type)) {
break;
@@ -981,14 +981,14 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
MaybeHandle<FieldType> new_field_type;
MaybeHandle<Object> old_value;
MaybeHandle<Object> new_value;
- if (old_details.location() == kField) {
+ if (old_details.location() == PropertyLocation::kField) {
old_field_type = handle(
old_descriptors_->GetFieldType(modified_descriptor_), isolate_);
} else {
old_value = handle(old_descriptors_->GetStrongValue(modified_descriptor_),
isolate_);
}
- if (new_details.location() == kField) {
+ if (new_details.location() == PropertyLocation::kField) {
new_field_type =
handle(new_descriptors->GetFieldType(modified_descriptor_), isolate_);
} else {
@@ -999,7 +999,8 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
PrintGeneralization(
isolate_, old_map_, stdout, "", modified_descriptor_, split_nof,
old_nof_,
- old_details.location() == kDescriptor && new_location_ == kField,
+ old_details.location() == PropertyLocation::kDescriptor &&
+ new_location_ == PropertyLocation::kField,
old_details.representation(), new_details.representation(),
old_details.constness(), new_details.constness(), old_field_type,
old_value, new_field_type, new_value);
@@ -1099,7 +1100,7 @@ void MapUpdater::UpdateFieldType(Isolate* isolate, Handle<Map> map,
DisallowGarbageCollection no_gc;
PropertyDetails details =
map->instance_descriptors(isolate).GetDetails(descriptor);
- if (details.location() != kField) return;
+ if (details.location() != PropertyLocation::kField) return;
DCHECK_EQ(kData, details.kind());
if (new_constness != details.constness() && map->is_prototype_map()) {
diff --git a/deps/v8/src/objects/map-updater.h b/deps/v8/src/objects/map-updater.h
index c5b425764a..6f022e1d39 100644
--- a/deps/v8/src/objects/map-updater.h
+++ b/deps/v8/src/objects/map-updater.h
@@ -230,7 +230,7 @@ class V8_EXPORT_PRIVATE MapUpdater {
PropertyKind new_kind_ = kData;
PropertyAttributes new_attributes_ = NONE;
PropertyConstness new_constness_ = PropertyConstness::kMutable;
- PropertyLocation new_location_ = kField;
+ PropertyLocation new_location_ = PropertyLocation::kField;
Representation new_representation_ = Representation::None();
// Data specific to kField location.
diff --git a/deps/v8/src/objects/map.cc b/deps/v8/src/objects/map.cc
index a8fdce3189..0610e59688 100644
--- a/deps/v8/src/objects/map.cc
+++ b/deps/v8/src/objects/map.cc
@@ -25,7 +25,6 @@
#include "src/roots/roots.h"
#include "src/utils/ostreams.h"
#include "src/zone/zone-containers.h"
-#include "torque-generated/field-offsets.h"
namespace v8 {
namespace internal {
@@ -206,6 +205,7 @@ VisitorId Map::GetVisitorId(Map map) {
return kVisitJSDataView;
case JS_FUNCTION_TYPE:
+ case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_PROMISE_CONSTRUCTOR_TYPE:
case JS_REG_EXP_CONSTRUCTOR_TYPE:
case JS_ARRAY_CONSTRUCTOR_TYPE:
@@ -502,7 +502,8 @@ int Map::NumberOfFields(ConcurrencyMode cmode) const {
: instance_descriptors();
int result = 0;
for (InternalIndex i : IterateOwnDescriptors()) {
- if (descriptors.GetDetails(i).location() == kField) result++;
+ if (descriptors.GetDetails(i).location() == PropertyLocation::kField)
+ result++;
}
return result;
}
@@ -513,7 +514,7 @@ Map::FieldCounts Map::GetFieldCounts() const {
int const_count = 0;
for (InternalIndex i : IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
switch (details.constness()) {
case PropertyConstness::kMutable:
mutable_count++;
@@ -599,9 +600,10 @@ Map Map::FindRootMap(Isolate* isolate) const {
Map Map::FindFieldOwner(Isolate* isolate, InternalIndex descriptor) const {
DisallowGarbageCollection no_gc;
- DCHECK_EQ(kField, instance_descriptors(isolate, kRelaxedLoad)
- .GetDetails(descriptor)
- .location());
+ DCHECK_EQ(PropertyLocation::kField,
+ instance_descriptors(isolate, kRelaxedLoad)
+ .GetDetails(descriptor)
+ .location());
Map result = *this;
while (true) {
Object back = result.GetBackPointer(isolate);
@@ -635,7 +637,8 @@ Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
DescriptorArray old_descriptors = old_map.instance_descriptors(isolate);
for (InternalIndex i : old_map.IterateOwnDescriptors()) {
PropertyDetails old_details = old_descriptors.GetDetails(i);
- if (old_details.location() == kField && old_details.kind() == kData) {
+ if (old_details.location() == PropertyLocation::kField &&
+ old_details.kind() == kData) {
FieldType old_type = old_descriptors.GetFieldType(i);
if (Map::FieldTypeIsCleared(old_details.representation(), old_type)) {
return Map();
@@ -708,7 +711,7 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map,
if (!old_details.representation().fits_into(new_details.representation())) {
return Map();
}
- if (new_details.location() == kField) {
+ if (new_details.location() == PropertyLocation::kField) {
if (new_details.kind() == kData) {
FieldType new_type = new_descriptors.GetFieldType(i);
// Cleared field types need special treatment. They represent lost
@@ -717,7 +720,7 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map,
return Map();
}
DCHECK_EQ(kData, old_details.kind());
- DCHECK_EQ(kField, old_details.location());
+ DCHECK_EQ(PropertyLocation::kField, old_details.location());
FieldType old_type = old_descriptors.GetFieldType(i);
if (FieldTypeIsCleared(old_details.representation(), old_type) ||
!old_type.NowIs(new_type)) {
@@ -732,8 +735,8 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map,
UNREACHABLE();
}
} else {
- DCHECK_EQ(kDescriptor, new_details.location());
- if (old_details.location() == kField ||
+ DCHECK_EQ(PropertyLocation::kDescriptor, new_details.location());
+ if (old_details.location() == PropertyLocation::kField ||
old_descriptors.GetStrongValue(i) !=
new_descriptors.GetStrongValue(i)) {
return Map();
@@ -1061,7 +1064,7 @@ int Map::NextFreePropertyIndex() const {
// Search properties backwards to find the last field.
for (int i = number_of_own_descriptors - 1; i >= 0; --i) {
PropertyDetails details = descs.GetDetails(InternalIndex(i));
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
return details.field_index() + details.field_width_in_words();
}
}
@@ -1479,7 +1482,7 @@ void Map::InstallDescriptors(Isolate* isolate, Handle<Map> parent,
new_descriptor.as_int() + 1);
child->CopyUnusedPropertyFields(*parent);
PropertyDetails details = descriptors->GetDetails(new_descriptor);
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
child->AccountAddedPropertyField();
}
@@ -1712,7 +1715,7 @@ namespace {
bool CanHoldValue(DescriptorArray descriptors, InternalIndex descriptor,
PropertyConstness constness, Object value) {
PropertyDetails details = descriptors.GetDetails(descriptor);
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
if (details.kind() == kData) {
return IsGeneralizableTo(constness, details.constness()) &&
value.FitsRepresentation(details.representation()) &&
@@ -1723,7 +1726,7 @@ bool CanHoldValue(DescriptorArray descriptors, InternalIndex descriptor,
}
} else {
- DCHECK_EQ(kDescriptor, details.location());
+ DCHECK_EQ(PropertyLocation::kDescriptor, details.location());
DCHECK_EQ(PropertyConstness::kConst, details.constness());
DCHECK_EQ(kAccessor, details.kind());
return false;
@@ -2009,8 +2012,9 @@ Handle<Map> Map::CopyReplaceDescriptor(Isolate* isolate, Handle<Map> map,
DCHECK_EQ(*key, descriptors->GetKey(insertion_index));
// This function does not support replacing property fields as
// that would break property field counters.
- DCHECK_NE(kField, descriptor->GetDetails().location());
- DCHECK_NE(kField, descriptors->GetDetails(insertion_index).location());
+ DCHECK_NE(PropertyLocation::kField, descriptor->GetDetails().location());
+ DCHECK_NE(PropertyLocation::kField,
+ descriptors->GetDetails(insertion_index).location());
Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
isolate, descriptors, map->NumberOfOwnDescriptors());
@@ -2089,7 +2093,7 @@ bool Map::EquivalentToForElementsKindTransition(const Map other,
: instance_descriptors();
for (InternalIndex i : IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
DCHECK(IsMostGeneralFieldType(details.representation(),
descriptors.GetFieldType(i)));
}
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index e649405091..d60890d910 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -12,7 +12,7 @@
#include "src/objects/internal-index.h"
#include "src/objects/objects.h"
#include "torque-generated/bit-fields.h"
-#include "torque-generated/field-offsets.h"
+#include "torque-generated/visitor-lists.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/megadom-handler.tq b/deps/v8/src/objects/megadom-handler.tq
index abcfa583a5..2a76e7045c 100644
--- a/deps/v8/src/objects/megadom-handler.tq
+++ b/deps/v8/src/objects/megadom-handler.tq
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generatePrint
@generateBodyDescriptor
extern class MegaDomHandler extends HeapObject {
accessor: MaybeObject;
diff --git a/deps/v8/src/objects/microtask.h b/deps/v8/src/objects/microtask.h
index f2869eadc7..8b1446373c 100644
--- a/deps/v8/src/objects/microtask.h
+++ b/deps/v8/src/objects/microtask.h
@@ -30,9 +30,6 @@ class Microtask : public TorqueGeneratedMicrotask<Microtask, Struct> {
class CallbackTask
: public TorqueGeneratedCallbackTask<CallbackTask, Microtask> {
public:
- // Dispatched behavior.
- DECL_PRINTER(CallbackTask)
-
TQ_OBJECT_CONSTRUCTORS(CallbackTask)
};
@@ -43,7 +40,6 @@ class CallableTask
: public TorqueGeneratedCallableTask<CallableTask, Microtask> {
public:
// Dispatched behavior.
- DECL_PRINTER(CallableTask)
DECL_VERIFIER(CallableTask)
void BriefPrintDetails(std::ostream& os);
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index 2945f36a14..110d67c888 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -17,6 +17,7 @@
#include "src/objects/js-generator-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/property-descriptor.h"
#include "src/objects/source-text-module.h"
#include "src/objects/synthetic-module-inl.h"
#include "src/utils/ostreams.h"
@@ -427,6 +428,44 @@ Maybe<PropertyAttributes> JSModuleNamespace::GetPropertyAttributes(
return Just(it->property_attributes());
}
+// ES
+// https://tc39.es/ecma262/#sec-module-namespace-exotic-objects-defineownproperty-p-desc
+// static
+Maybe<bool> JSModuleNamespace::DefineOwnProperty(
+ Isolate* isolate, Handle<JSModuleNamespace> object, Handle<Object> key,
+ PropertyDescriptor* desc, Maybe<ShouldThrow> should_throw) {
+ // 1. If Type(P) is Symbol, return OrdinaryDefineOwnProperty(O, P, Desc).
+ if (key->IsSymbol()) {
+ return OrdinaryDefineOwnProperty(isolate, object, key, desc, should_throw);
+ }
+
+ // 2. Let current be ? O.[[GetOwnProperty]](P).
+ PropertyKey lookup_key(isolate, key);
+ LookupIterator it(isolate, object, lookup_key, LookupIterator::OWN);
+ PropertyDescriptor current;
+ Maybe<bool> has_own = GetOwnPropertyDescriptor(&it, &current);
+ MAYBE_RETURN(has_own, Nothing<bool>());
+
+ // 3. If current is undefined, return false.
+ // 4. If Desc.[[Configurable]] is present and has value true, return false.
+ // 5. If Desc.[[Enumerable]] is present and has value false, return false.
+ // 6. If ! IsAccessorDescriptor(Desc) is true, return false.
+ // 7. If Desc.[[Writable]] is present and has value false, return false.
+ // 8. If Desc.[[Value]] is present, return
+ // SameValue(Desc.[[Value]], current.[[Value]]).
+ if (!has_own.FromJust() ||
+ (desc->has_configurable() && desc->configurable()) ||
+ (desc->has_enumerable() && !desc->enumerable()) ||
+ PropertyDescriptor::IsAccessorDescriptor(desc) ||
+ (desc->has_writable() && !desc->writable()) ||
+ (desc->has_value() && !desc->value()->SameValue(*current.value()))) {
+ RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
+ NewTypeError(MessageTemplate::kRedefineDisallowed, key));
+ }
+
+ return Just(true);
+}
+
bool Module::IsGraphAsync(Isolate* isolate) const {
DisallowGarbageCollection no_gc;
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index 5cb7e4bb7f..208613e4c9 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -10,7 +10,6 @@
#include "src/objects/js-objects.h"
#include "src/objects/objects.h"
#include "src/objects/struct.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -160,6 +159,10 @@ class JSModuleNamespace
static V8_WARN_UNUSED_RESULT Maybe<PropertyAttributes> GetPropertyAttributes(
LookupIterator* it);
+ static V8_WARN_UNUSED_RESULT Maybe<bool> DefineOwnProperty(
+ Isolate* isolate, Handle<JSModuleNamespace> o, Handle<Object> key,
+ PropertyDescriptor* desc, Maybe<ShouldThrow> should_throw);
+
// In-object fields.
enum {
kToStringTagFieldIndex,
diff --git a/deps/v8/src/objects/name.tq b/deps/v8/src/objects/name.tq
index 55f70d26b5..6fe141f90c 100644
--- a/deps/v8/src/objects/name.tq
+++ b/deps/v8/src/objects/name.tq
@@ -64,7 +64,7 @@ const kArrayIndexLengthBitsShift: uint32 =
kNofHashBitFields + kArrayIndexValueBits;
macro TenToThe(exponent: uint32): uint32 {
- assert(exponent <= 9);
+ dcheck(exponent <= 9);
let answer: int32 = 1;
for (let i: int32 = 0; i < Signed(exponent); i++) {
answer = answer * 10;
@@ -74,14 +74,14 @@ macro TenToThe(exponent: uint32): uint32 {
macro MakeArrayIndexHash(value: uint32, length: uint32): NameHash {
// This is in sync with StringHasher::MakeArrayIndexHash.
- assert(length <= kMaxArrayIndexSize);
+ dcheck(length <= kMaxArrayIndexSize);
const one: uint32 = 1;
- assert(TenToThe(kMaxCachedArrayIndexLength) < (one << kArrayIndexValueBits));
+ dcheck(TenToThe(kMaxCachedArrayIndexLength) < (one << kArrayIndexValueBits));
let hash: uint32 = value;
hash = (hash << kArrayIndexValueBitsShift) |
(length << kArrayIndexLengthBitsShift);
- assert((hash & kIsNotIntegerIndexMask) == 0);
- assert(
+ dcheck((hash & kIsNotIntegerIndexMask) == 0);
+ dcheck(
(length <= kMaxCachedArrayIndexLength) == ContainsCachedArrayIndex(hash));
return %RawDownCast<NameHash>(hash);
}
diff --git a/deps/v8/src/objects/object-list-macros.h b/deps/v8/src/objects/object-list-macros.h
index e5ba2684b2..51dc178f8b 100644
--- a/deps/v8/src/objects/object-list-macros.h
+++ b/deps/v8/src/objects/object-list-macros.h
@@ -270,6 +270,7 @@ class ZoneForwardList;
V(FreeSpaceOrFiller) \
V(FunctionContext) \
V(JSApiObject) \
+ V(JSClassConstructor) \
V(JSLastDummyApiObject) \
V(JSPromiseConstructor) \
V(JSArrayConstructor) \
diff --git a/deps/v8/src/objects/objects-body-descriptors-inl.h b/deps/v8/src/objects/objects-body-descriptors-inl.h
index 838b0536e2..44a11accdb 100644
--- a/deps/v8/src/objects/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects/objects-body-descriptors-inl.h
@@ -931,7 +931,7 @@ class CodeDataContainer::BodyDescriptor final : public BodyDescriptorBase {
CodeDataContainer::kPointerFieldsWeakEndOffset, v);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- v->VisitCodePointer(obj, obj.RawField(kCodeOffset));
+ v->VisitCodePointer(obj, obj.RawCodeField(kCodeOffset));
}
}
@@ -1114,6 +1114,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_STRING_ITERATOR_TYPE:
case JS_TYPED_ARRAY_PROTOTYPE_TYPE:
case JS_FUNCTION_TYPE:
+ case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_PROMISE_CONSTRUCTOR_TYPE:
case JS_REG_EXP_CONSTRUCTOR_TYPE:
case JS_ARRAY_CONSTRUCTOR_TYPE:
diff --git a/deps/v8/src/objects/objects-inl.h b/deps/v8/src/objects/objects-inl.h
index 6800db3b78..92452e43b0 100644
--- a/deps/v8/src/objects/objects-inl.h
+++ b/deps/v8/src/objects/objects-inl.h
@@ -18,6 +18,7 @@
#include "src/builtins/builtins.h"
#include "src/common/external-pointer-inl.h"
#include "src/common/globals.h"
+#include "src/common/ptr-compr-inl.h"
#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-write-barrier-inl.h"
@@ -59,7 +60,7 @@ Smi PropertyDetails::AsSmi() const {
}
int PropertyDetails::field_width_in_words() const {
- DCHECK_EQ(location(), kField);
+ DCHECK_EQ(location(), PropertyLocation::kField);
return 1;
}
@@ -648,6 +649,10 @@ MaybeObjectSlot HeapObject::RawMaybeWeakField(int byte_offset) const {
return MaybeObjectSlot(field_address(byte_offset));
}
+CodeObjectSlot HeapObject::RawCodeField(int byte_offset) const {
+ return CodeObjectSlot(field_address(byte_offset));
+}
+
MapWord MapWord::FromMap(const Map map) {
DCHECK(map.is_null() || !MapWord::IsPacked(map.ptr()));
#ifdef V8_MAP_PACKING
@@ -675,6 +680,22 @@ MapWord MapWord::FromForwardingAddress(HeapObject object) {
HeapObject MapWord::ToForwardingAddress() {
DCHECK(IsForwardingAddress());
+ HeapObject obj = HeapObject::FromAddress(value_);
+ // For objects allocated outside of the main pointer compression cage the
+ // variant with explicit cage base must be used.
+ DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !obj.IsCode());
+ return obj;
+}
+
+HeapObject MapWord::ToForwardingAddress(PtrComprCageBase host_cage_base) {
+ DCHECK(IsForwardingAddress());
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ // Recompress value_ using proper host_cage_base since the map word
+ // has the upper 32 bits that correspond to the main cage base value.
+ Address value =
+ DecompressTaggedPointer(host_cage_base, CompressTagged(value_));
+ return HeapObject::FromAddress(value);
+ }
return HeapObject::FromAddress(value_);
}
diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc
index 68482fe68f..db5a905f9c 100644
--- a/deps/v8/src/objects/objects.cc
+++ b/deps/v8/src/objects/objects.cc
@@ -2435,7 +2435,7 @@ void DescriptorArray::GeneralizeAllFields() {
for (InternalIndex i : InternalIndex::Range(length)) {
PropertyDetails details = GetDetails(i);
details = details.CopyWithRepresentation(Representation::Tagged());
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
DCHECK_EQ(kData, details.kind());
details = details.CopyWithConstness(PropertyConstness::kMutable);
SetValue(i, MaybeObject::FromObject(FieldType::Any()));
diff --git a/deps/v8/src/objects/objects.h b/deps/v8/src/objects/objects.h
index 61bcf79800..7cb94dfb74 100644
--- a/deps/v8/src/objects/objects.h
+++ b/deps/v8/src/objects/objects.h
@@ -646,7 +646,8 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
bool operator()(const Object a, const Object b) const { return a < b; }
};
- template <class T, typename std::enable_if<std::is_arithmetic<T>::value,
+ template <class T, typename std::enable_if<std::is_arithmetic<T>::value ||
+ std::is_enum<T>::value,
int>::type = 0>
inline T ReadField(size_t offset) const {
// Pointer compression causes types larger than kTaggedSize to be unaligned.
@@ -663,7 +664,8 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
}
}
- template <class T, typename std::enable_if<std::is_arithmetic<T>::value,
+ template <class T, typename std::enable_if<std::is_arithmetic<T>::value ||
+ std::is_enum<T>::value,
int>::type = 0>
inline void WriteField(size_t offset, T value) const {
// Pointer compression causes types larger than kTaggedSize to be unaligned.
@@ -785,8 +787,14 @@ class MapWord {
// Create a map word from a forwarding address.
static inline MapWord FromForwardingAddress(HeapObject object);
- // View this map word as a forwarding address.
+ // View this map word as a forwarding address. The parameterless version
+ // is allowed to be used for objects allocated in the main pointer compression
+ // cage, while the second variant uses the value of the cage base explicitly
+ // and thus can be used in situations where one has to deal with both cases.
+ // Note, that the parameterless version is preferred because it avoids
+ // unnecessary recompressions.
inline HeapObject ToForwardingAddress();
+ inline HeapObject ToForwardingAddress(PtrComprCageBase host_cage_base);
inline Address ptr() { return value_; }
diff --git a/deps/v8/src/objects/option-utils.cc b/deps/v8/src/objects/option-utils.cc
new file mode 100644
index 0000000000..9e05b4a104
--- /dev/null
+++ b/deps/v8/src/objects/option-utils.cc
@@ -0,0 +1,172 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/option-utils.h"
+
+#include "src/numbers/conversions.h"
+#include "src/objects/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// ecma402/#sec-getoptionsobject
+MaybeHandle<JSReceiver> GetOptionsObject(Isolate* isolate,
+ Handle<Object> options,
+ const char* method_name) {
+ // 1. If options is undefined, then
+ if (options->IsUndefined(isolate)) {
+ // a. Return ! ObjectCreate(null).
+ return isolate->factory()->NewJSObjectWithNullProto();
+ }
+ // 2. If Type(options) is Object, then
+ if (options->IsJSReceiver()) {
+ // a. Return options.
+ return Handle<JSReceiver>::cast(options);
+ }
+ // 3. Throw a TypeError exception.
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kInvalidArgument),
+ JSReceiver);
+}
+
+// ecma402/#sec-coerceoptionstoobject
+MaybeHandle<JSReceiver> CoerceOptionsToObject(Isolate* isolate,
+ Handle<Object> options,
+ const char* method_name) {
+ // 1. If options is undefined, then
+ if (options->IsUndefined(isolate)) {
+ // a. Return ! ObjectCreate(null).
+ return isolate->factory()->NewJSObjectWithNullProto();
+ }
+ // 2. Return ? ToObject(options).
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
+ Object::ToObject(isolate, options, method_name),
+ JSReceiver);
+ return Handle<JSReceiver>::cast(options);
+}
+
+Maybe<bool> GetStringOption(Isolate* isolate, Handle<JSReceiver> options,
+ const char* property,
+ std::vector<const char*> values,
+ const char* method_name,
+ std::unique_ptr<char[]>* result) {
+ Handle<String> property_str =
+ isolate->factory()->NewStringFromAsciiChecked(property);
+
+ // 1. Let value be ? Get(options, property).
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value,
+ Object::GetPropertyOrElement(isolate, options, property_str),
+ Nothing<bool>());
+
+ if (value->IsUndefined(isolate)) {
+ return Just(false);
+ }
+
+ // 2. c. Let value be ? ToString(value).
+ Handle<String> value_str;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value_str, Object::ToString(isolate, value), Nothing<bool>());
+ std::unique_ptr<char[]> value_cstr = value_str->ToCString();
+
+ // 2. d. if values is not undefined, then
+ if (values.size() > 0) {
+ // 2. d. i. If values does not contain an element equal to value,
+ // throw a RangeError exception.
+ for (size_t i = 0; i < values.size(); i++) {
+ if (strcmp(values.at(i), value_cstr.get()) == 0) {
+ // 2. e. return value
+ *result = std::move(value_cstr);
+ return Just(true);
+ }
+ }
+
+ Handle<String> method_str =
+ isolate->factory()->NewStringFromAsciiChecked(method_name);
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kValueOutOfRange, value, method_str,
+ property_str),
+ Nothing<bool>());
+ }
+
+ // 2. e. return value
+ *result = std::move(value_cstr);
+ return Just(true);
+}
+
+V8_WARN_UNUSED_RESULT Maybe<bool> GetBoolOption(Isolate* isolate,
+ Handle<JSReceiver> options,
+ const char* property,
+ const char* method_name,
+ bool* result) {
+ Handle<String> property_str =
+ isolate->factory()->NewStringFromAsciiChecked(property);
+
+ // 1. Let value be ? Get(options, property).
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value,
+ Object::GetPropertyOrElement(isolate, options, property_str),
+ Nothing<bool>());
+
+ // 2. If value is not undefined, then
+ if (!value->IsUndefined(isolate)) {
+ // 2. b. i. Let value be ToBoolean(value).
+ *result = value->BooleanValue(isolate);
+
+ // 2. e. return value
+ return Just(true);
+ }
+
+ return Just(false);
+}
+
+// ecma402/#sec-defaultnumberoption
+Maybe<int> DefaultNumberOption(Isolate* isolate, Handle<Object> value, int min,
+ int max, int fallback, Handle<String> property) {
+ // 2. Else, return fallback.
+ if (value->IsUndefined()) return Just(fallback);
+
+ // 1. If value is not undefined, then
+ // a. Let value be ? ToNumber(value).
+ Handle<Object> value_num;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value_num, Object::ToNumber(isolate, value), Nothing<int>());
+ DCHECK(value_num->IsNumber());
+
+ // b. If value is NaN or less than minimum or greater than maximum, throw a
+ // RangeError exception.
+ if (value_num->IsNaN() || value_num->Number() < min ||
+ value_num->Number() > max) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kPropertyValueOutOfRange, property),
+ Nothing<int>());
+ }
+
+ // The max and min arguments are integers and the above check makes
+ // sure that we are within the integer range making this double to
+ // int conversion safe.
+ //
+ // c. Return floor(value).
+ return Just(FastD2I(floor(value_num->Number())));
+}
+
+// ecma402/#sec-getnumberoption
+Maybe<int> GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
+ Handle<String> property, int min, int max,
+ int fallback) {
+ // 1. Let value be ? Get(options, property).
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value, JSReceiver::GetProperty(isolate, options, property),
+ Nothing<int>());
+
+ // Return ? DefaultNumberOption(value, minimum, maximum, fallback).
+ return DefaultNumberOption(isolate, value, min, max, fallback, property);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/option-utils.h b/deps/v8/src/objects/option-utils.h
new file mode 100644
index 0000000000..5bb2c35701
--- /dev/null
+++ b/deps/v8/src/objects/option-utils.h
@@ -0,0 +1,95 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_OPTION_UTILS_H_
+#define V8_OBJECTS_OPTION_UTILS_H_
+
+#include "src/execution/isolate.h"
+#include "src/objects/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// ecma402/#sec-getoptionsobject and temporal/#sec-getoptionsobject
+V8_WARN_UNUSED_RESULT MaybeHandle<JSReceiver> GetOptionsObject(
+ Isolate* isolate, Handle<Object> options, const char* method_name);
+
+// ecma402/#sec-coerceoptionstoobject
+V8_WARN_UNUSED_RESULT MaybeHandle<JSReceiver> CoerceOptionsToObject(
+ Isolate* isolate, Handle<Object> options, const char* method_name);
+
+// ECMA402 9.2.10. GetOption( options, property, type, values, fallback)
+// ecma402/#sec-getoption and temporal/#sec-getoption
+//
+// This is specialized for the case when type is string.
+//
+// Instead of passing undefined for the values argument as the spec
+// defines, pass in an empty vector.
+//
+// Returns true if options object has the property and stores the
+// result in value. Returns false if the value is not found. The
+// caller is required to use fallback value appropriately in this
+// case.
+//
+// method_name is a string denoting the method the call from; used when
+// printing the error message.
+V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT Maybe<bool> GetStringOption(
+ Isolate* isolate, Handle<JSReceiver> options, const char* property,
+ std::vector<const char*> values, const char* method_name,
+ std::unique_ptr<char[]>* result);
+
+// A helper template to get string from option into a enum.
+// The enum in the enum_values is the corresponding value to the strings
+// in the str_values. If the option does not contains name,
+// default_value will be return.
+template <typename T>
+V8_WARN_UNUSED_RESULT static Maybe<T> GetStringOption(
+ Isolate* isolate, Handle<JSReceiver> options, const char* name,
+ const char* method_name, const std::vector<const char*>& str_values,
+ const std::vector<T>& enum_values, T default_value) {
+ DCHECK_EQ(str_values.size(), enum_values.size());
+ std::unique_ptr<char[]> cstr;
+ Maybe<bool> found =
+ GetStringOption(isolate, options, name, str_values, method_name, &cstr);
+ MAYBE_RETURN(found, Nothing<T>());
+ if (found.FromJust()) {
+ DCHECK_NOT_NULL(cstr.get());
+ for (size_t i = 0; i < str_values.size(); i++) {
+ if (strcmp(cstr.get(), str_values[i]) == 0) {
+ return Just(enum_values[i]);
+ }
+ }
+ UNREACHABLE();
+ }
+ return Just(default_value);
+}
+
+// ECMA402 9.2.10. GetOption( options, property, type, values, fallback)
+// ecma402/#sec-getoption
+//
+// This is specialized for the case when type is boolean.
+//
+// Returns true if options object has the property and stores the
+// result in value. Returns false if the value is not found. The
+// caller is required to use fallback value appropriately in this
+// case.
+//
+// method_name is a string denoting the method it called from; used when
+// printing the error message.
+V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT Maybe<bool> GetBoolOption(
+ Isolate* isolate, Handle<JSReceiver> options, const char* property,
+ const char* method_name, bool* result);
+
+V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT Maybe<int> GetNumberOption(
+ Isolate* isolate, Handle<JSReceiver> options, Handle<String> property,
+ int min, int max, int fallback);
+
+// ecma402/#sec-defaultnumberoption
+V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT Maybe<int> DefaultNumberOption(
+ Isolate* isolate, Handle<Object> value, int min, int max, int fallback,
+ Handle<String> property);
+
+} // namespace internal
+} // namespace v8
+#endif // V8_OBJECTS_OPTION_UTILS_H_
diff --git a/deps/v8/src/objects/ordered-hash-table.tq b/deps/v8/src/objects/ordered-hash-table.tq
index 82d49b27bc..b37b03e850 100644
--- a/deps/v8/src/objects/ordered-hash-table.tq
+++ b/deps/v8/src/objects/ordered-hash-table.tq
@@ -14,7 +14,6 @@ const kSmallOrderedHashTableNotFound: constexpr int31
const kSmallOrderedHashTableLoadFactor: constexpr int31
generates 'SmallOrderedHashTable<int>::kLoadFactor';
-@noVerifier
@abstract
@doNotGenerateCppClass
extern class SmallOrderedHashTable extends HeapObject
@@ -41,7 +40,7 @@ extern class SmallOrderedHashSet extends SmallOrderedHashTable {
@export
macro AllocateSmallOrderedHashSet(capacity: intptr): SmallOrderedHashSet {
const hashTableSize = capacity / kSmallOrderedHashTableLoadFactor;
- assert(
+ dcheck(
0 <= hashTableSize && hashTableSize <= kSmallOrderedHashTableMaxCapacity);
return new SmallOrderedHashSet{
map: kSmallOrderedHashSetMap,
@@ -80,7 +79,7 @@ extern class SmallOrderedHashMap extends SmallOrderedHashTable {
@export
macro AllocateSmallOrderedHashMap(capacity: intptr): SmallOrderedHashMap {
const hashTableSize = capacity / kSmallOrderedHashTableLoadFactor;
- assert(
+ dcheck(
0 <= hashTableSize && hashTableSize <= kSmallOrderedHashTableMaxCapacity);
return new SmallOrderedHashMap{
map: kSmallOrderedHashMapMap,
diff --git a/deps/v8/src/objects/promise.h b/deps/v8/src/objects/promise.h
index 497498c166..075afbeebc 100644
--- a/deps/v8/src/objects/promise.h
+++ b/deps/v8/src/objects/promise.h
@@ -39,9 +39,6 @@ class PromiseFulfillReactionJobTask
: public TorqueGeneratedPromiseFulfillReactionJobTask<
PromiseFulfillReactionJobTask, PromiseReactionJobTask> {
public:
- // Dispatched behavior.
- DECL_PRINTER(PromiseFulfillReactionJobTask)
-
STATIC_ASSERT(kSize == kSizeOfAllPromiseReactionJobTasks);
TQ_OBJECT_CONSTRUCTORS(PromiseFulfillReactionJobTask)
@@ -52,9 +49,6 @@ class PromiseRejectReactionJobTask
: public TorqueGeneratedPromiseRejectReactionJobTask<
PromiseRejectReactionJobTask, PromiseReactionJobTask> {
public:
- // Dispatched behavior.
- DECL_PRINTER(PromiseRejectReactionJobTask)
-
STATIC_ASSERT(kSize == kSizeOfAllPromiseReactionJobTasks);
TQ_OBJECT_CONSTRUCTORS(PromiseRejectReactionJobTask)
@@ -65,9 +59,6 @@ class PromiseResolveThenableJobTask
: public TorqueGeneratedPromiseResolveThenableJobTask<
PromiseResolveThenableJobTask, Microtask> {
public:
- // Dispatched behavior.
- DECL_PRINTER(PromiseResolveThenableJobTask)
-
TQ_OBJECT_CONSTRUCTORS(PromiseResolveThenableJobTask)
};
@@ -75,9 +66,6 @@ class PromiseResolveThenableJobTask
class PromiseCapability
: public TorqueGeneratedPromiseCapability<PromiseCapability, Struct> {
public:
- // Dispatched behavior.
- DECL_PRINTER(PromiseCapability)
-
TQ_OBJECT_CONSTRUCTORS(PromiseCapability)
};
@@ -103,9 +91,6 @@ class PromiseReaction
public:
enum Type { kFulfill, kReject };
- // Dispatched behavior.
- DECL_PRINTER(PromiseReaction)
-
TQ_OBJECT_CONSTRUCTORS(PromiseReaction)
};
diff --git a/deps/v8/src/objects/property-array.h b/deps/v8/src/objects/property-array.h
index 52242c87c9..03c2ccd005 100644
--- a/deps/v8/src/objects/property-array.h
+++ b/deps/v8/src/objects/property-array.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_PROPERTY_ARRAY_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/property-cell.h b/deps/v8/src/objects/property-cell.h
index 38a83f590d..a85bc1e4df 100644
--- a/deps/v8/src/objects/property-cell.h
+++ b/deps/v8/src/objects/property-cell.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_PROPERTY_CELL_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/property-descriptor-object.tq b/deps/v8/src/objects/property-descriptor-object.tq
index 3f0acdd689..256903e815 100644
--- a/deps/v8/src/objects/property-descriptor-object.tq
+++ b/deps/v8/src/objects/property-descriptor-object.tq
@@ -16,7 +16,6 @@ bitfield struct PropertyDescriptorObjectFlags extends uint31 {
has_set: bool: 1 bit;
}
-@generatePrint
extern class PropertyDescriptorObject extends Struct {
flags: SmiTagged<PropertyDescriptorObjectFlags>;
value: JSAny|TheHole;
diff --git a/deps/v8/src/objects/property-descriptor.cc b/deps/v8/src/objects/property-descriptor.cc
index cde66262cf..e33759f6f7 100644
--- a/deps/v8/src/objects/property-descriptor.cc
+++ b/deps/v8/src/objects/property-descriptor.cc
@@ -61,7 +61,7 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
for (InternalIndex i : map->IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
Handle<Object> value;
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
if (details.kind() == kData) {
value = JSObject::FastPropertyAt(Handle<JSObject>::cast(obj),
details.representation(),
@@ -73,7 +73,7 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
}
} else {
- DCHECK_EQ(kDescriptor, details.location());
+ DCHECK_EQ(PropertyLocation::kDescriptor, details.location());
if (details.kind() == kData) {
value = handle(descs->GetStrongValue(i), isolate);
} else {
diff --git a/deps/v8/src/objects/property-descriptor.h b/deps/v8/src/objects/property-descriptor.h
index 22fb1d6ff8..8950a9d227 100644
--- a/deps/v8/src/objects/property-descriptor.h
+++ b/deps/v8/src/objects/property-descriptor.h
@@ -122,10 +122,6 @@ class PropertyDescriptor {
Handle<Object> get_;
Handle<Object> set_;
Handle<Object> name_;
-
- // Some compilers (Xcode 5.1, ARM GCC 4.9) insist on having a copy
- // constructor for std::vector<PropertyDescriptor>, so we can't
- // DISALLOW_COPY_AND_ASSIGN(PropertyDescriptor); here.
};
} // namespace internal
diff --git a/deps/v8/src/objects/property-details.h b/deps/v8/src/objects/property-details.h
index f32d6ceb89..f356bcd53a 100644
--- a/deps/v8/src/objects/property-details.h
+++ b/deps/v8/src/objects/property-details.h
@@ -83,7 +83,7 @@ enum PropertyKind { kData = 0, kAccessor = 1 };
// Order of modes is significant.
// Must fit in the BitField PropertyDetails::LocationField.
-enum PropertyLocation { kField = 0, kDescriptor = 1 };
+enum class PropertyLocation { kField = 0, kDescriptor = 1 };
// Order of modes is significant.
// Must fit in the BitField PropertyDetails::ConstnessField.
@@ -256,7 +256,8 @@ class PropertyDetails {
// Property details for global dictionary properties.
PropertyDetails(PropertyKind kind, PropertyAttributes attributes,
PropertyCellType cell_type, int dictionary_index = 0) {
- value_ = KindField::encode(kind) | LocationField::encode(kField) |
+ value_ = KindField::encode(kind) |
+ LocationField::encode(PropertyLocation::kField) |
AttributesField::encode(attributes) |
// We track PropertyCell constness via PropertyCellTypeField,
// so we set ConstnessField to kMutable to simplify DCHECKs related
@@ -269,7 +270,8 @@ class PropertyDetails {
// Property details for dictionary mode properties/elements.
PropertyDetails(PropertyKind kind, PropertyAttributes attributes,
PropertyConstness constness, int dictionary_index = 0) {
- value_ = KindField::encode(kind) | LocationField::encode(kField) |
+ value_ = KindField::encode(kind) |
+ LocationField::encode(PropertyLocation::kField) |
AttributesField::encode(attributes) |
ConstnessField::encode(constness) |
DictionaryStorageField::encode(dictionary_index) |
@@ -499,7 +501,7 @@ class PropertyDetails {
// kField location is more general than kDescriptor, kDescriptor generalizes
// only to itself.
inline bool IsGeneralizableTo(PropertyLocation a, PropertyLocation b) {
- return b == kField || a == kDescriptor;
+ return b == PropertyLocation::kField || a == PropertyLocation::kDescriptor;
}
// PropertyConstness::kMutable constness is more general than
diff --git a/deps/v8/src/objects/property.cc b/deps/v8/src/objects/property.cc
index 014b41ff38..4cc29c70ae 100644
--- a/deps/v8/src/objects/property.cc
+++ b/deps/v8/src/objects/property.cc
@@ -89,8 +89,8 @@ Descriptor Descriptor::DataField(Handle<Name> key, int field_index,
Representation representation,
const MaybeObjectHandle& wrapped_field_type) {
DCHECK(wrapped_field_type->IsSmi() || wrapped_field_type->IsWeak());
- PropertyDetails details(kData, attributes, kField, constness, representation,
- field_index);
+ PropertyDetails details(kData, attributes, PropertyLocation::kField,
+ constness, representation, field_index);
return Descriptor(key, wrapped_field_type, details);
}
@@ -98,7 +98,7 @@ Descriptor Descriptor::DataConstant(Handle<Name> key, Handle<Object> value,
PropertyAttributes attributes) {
PtrComprCageBase cage_base = GetPtrComprCageBase(*key);
return Descriptor(key, MaybeObjectHandle(value), kData, attributes,
- kDescriptor, PropertyConstness::kConst,
+ PropertyLocation::kDescriptor, PropertyConstness::kConst,
value->OptimalRepresentation(cage_base), 0);
}
@@ -114,7 +114,7 @@ Descriptor Descriptor::AccessorConstant(Handle<Name> key,
Handle<Object> foreign,
PropertyAttributes attributes) {
return Descriptor(key, MaybeObjectHandle(foreign), kAccessor, attributes,
- kDescriptor, PropertyConstness::kConst,
+ PropertyLocation::kDescriptor, PropertyConstness::kConst,
Representation::Tagged(), 0);
}
@@ -134,7 +134,7 @@ void PropertyDetails::PrintAsFastTo(std::ostream& os, PrintMode mode) {
os << "(";
if (constness() == PropertyConstness::kConst) os << "const ";
os << (kind() == kData ? "data" : "accessor");
- if (location() == kField) {
+ if (location() == PropertyLocation::kField) {
os << " field";
if (mode & kPrintFieldIndex) {
os << " " << field_index();
diff --git a/deps/v8/src/objects/regexp-match-info.h b/deps/v8/src/objects/regexp-match-info.h
index 0d6f76fccf..3ce08262b1 100644
--- a/deps/v8/src/objects/regexp-match-info.h
+++ b/deps/v8/src/objects/regexp-match-info.h
@@ -8,7 +8,6 @@
#include "src/base/compiler-specific.h"
#include "src/objects/fixed-array.h"
#include "src/objects/objects.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 1b8c56386f..5ab324dc95 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -244,8 +244,6 @@ SharedFunctionInfo::Inlineability SharedFunctionInfo::GetInlineability(
return kNeedsBinaryCoverage;
}
- if (optimization_disabled()) return kHasOptimizationDisabled;
-
// Built-in functions are handled by the JSCallReducer.
if (HasBuiltinId()) return kIsBuiltin;
@@ -266,6 +264,8 @@ SharedFunctionInfo::Inlineability SharedFunctionInfo::GetInlineability(
if (HasBreakInfo()) return kMayContainBreakPoints;
+ if (optimization_disabled()) return kHasOptimizationDisabled;
+
return kIsInlineable;
}
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index 598ccfd883..52678a8724 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -22,7 +22,6 @@
#include "src/roots/roots.h"
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
#include "torque-generated/bit-fields.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -147,8 +146,6 @@ class InterpreterData
public:
DECL_ACCESSORS(interpreter_trampoline, Code)
- DECL_PRINTER(InterpreterData)
-
private:
DECL_ACCESSORS(raw_interpreter_trampoline, CodeT)
@@ -533,17 +530,19 @@ class SharedFunctionInfo
inline bool ShouldFlushCode(base::EnumSet<CodeFlushMode> code_flush_mode);
enum Inlineability {
- kIsInlineable,
// Different reasons for not being inlineable:
kHasNoScript,
kNeedsBinaryCoverage,
- kHasOptimizationDisabled,
kIsBuiltin,
kIsNotUserCode,
kHasNoBytecode,
kExceedsBytecodeLimit,
kMayContainBreakPoints,
+ kHasOptimizationDisabled,
+ // Actually inlineable!
+ kIsInlineable,
};
+ // Returns the first value that applies (see enum definition for the order).
template <typename IsolateT>
Inlineability GetInlineability(IsolateT* isolate, bool is_turboprop) const;
diff --git a/deps/v8/src/objects/source-text-module.h b/deps/v8/src/objects/source-text-module.h
index 6f2a3cd0f7..bb5bd5d796 100644
--- a/deps/v8/src/objects/source-text-module.h
+++ b/deps/v8/src/objects/source-text-module.h
@@ -283,7 +283,6 @@ class SourceTextModuleInfoEntry
: public TorqueGeneratedSourceTextModuleInfoEntry<SourceTextModuleInfoEntry,
Struct> {
public:
- DECL_PRINTER(SourceTextModuleInfoEntry)
DECL_VERIFIER(SourceTextModuleInfoEntry)
template <typename IsolateT>
diff --git a/deps/v8/src/objects/source-text-module.tq b/deps/v8/src/objects/source-text-module.tq
index d378d5a862..c663c6906a 100644
--- a/deps/v8/src/objects/source-text-module.tq
+++ b/deps/v8/src/objects/source-text-module.tq
@@ -47,7 +47,6 @@ extern class SourceTextModule extends Module {
flags: SmiTagged<SourceTextModuleFlags>;
}
-@generatePrint
extern class ModuleRequest extends Struct {
specifier: String;
diff --git a/deps/v8/src/objects/stack-frame-info.cc b/deps/v8/src/objects/stack-frame-info.cc
index 7ccdd6d955..71357816d7 100644
--- a/deps/v8/src/objects/stack-frame-info.cc
+++ b/deps/v8/src/objects/stack-frame-info.cc
@@ -292,7 +292,7 @@ PrimitiveHeapObject InferMethodNameFromFastObject(Isolate* isolate,
auto details = descriptors.GetDetails(i);
if (details.IsDontEnum()) continue;
Object value;
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
auto field_index = FieldIndex::ForPropertyIndex(
map, details.field_index(), details.representation());
if (field_index.is_double()) continue;
diff --git a/deps/v8/src/objects/stack-frame-info.h b/deps/v8/src/objects/stack-frame-info.h
index afd45819b9..ce23de26d4 100644
--- a/deps/v8/src/objects/stack-frame-info.h
+++ b/deps/v8/src/objects/stack-frame-info.h
@@ -44,7 +44,6 @@ class StackFrameInfo
DECL_ACCESSORS(code_object, HeapObject)
// Dispatched behavior.
- DECL_PRINTER(StackFrameInfo)
DECL_VERIFIER(StackFrameInfo)
// Used to signal that the requested field is unknown.
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index ba2d463047..33f34c6bcd 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -119,6 +119,12 @@ StringShape::StringShape(const String str)
DCHECK_EQ(type_ & kIsNotStringMask, kStringTag);
}
+StringShape::StringShape(const String str, PtrComprCageBase cage_base)
+ : type_(str.map(cage_base, kAcquireLoad).instance_type()) {
+ set_valid();
+ DCHECK_EQ(type_ & kIsNotStringMask, kStringTag);
+}
+
StringShape::StringShape(Map map) : type_(map.instance_type()) {
set_valid();
DCHECK_EQ(type_ & kIsNotStringMask, kStringTag);
@@ -506,12 +512,14 @@ bool String::IsEqualToImpl(
data, len);
case kExternalStringTag | kOneByteStringTag:
return CompareCharsEqual(
- ExternalOneByteString::cast(string).GetChars() + slice_offset, data,
- len);
+ ExternalOneByteString::cast(string).GetChars(cage_base) +
+ slice_offset,
+ data, len);
case kExternalStringTag | kTwoByteStringTag:
return CompareCharsEqual(
- ExternalTwoByteString::cast(string).GetChars() + slice_offset, data,
- len);
+ ExternalTwoByteString::cast(string).GetChars(cage_base) +
+ slice_offset,
+ data, len);
case kSlicedStringTag | kOneByteStringTag:
case kSlicedStringTag | kTwoByteStringTag: {
@@ -576,19 +584,20 @@ bool String::IsOneByteEqualTo(base::Vector<const char> str) {
}
template <typename Char>
-const Char* String::GetChars(const DisallowGarbageCollection& no_gc) const {
+const Char* String::GetChars(PtrComprCageBase cage_base,
+ const DisallowGarbageCollection& no_gc) const {
DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
- return StringShape(*this).IsExternal()
- ? CharTraits<Char>::ExternalString::cast(*this).GetChars()
+ return StringShape(*this, cage_base).IsExternal()
+ ? CharTraits<Char>::ExternalString::cast(*this).GetChars(cage_base)
: CharTraits<Char>::String::cast(*this).GetChars(no_gc);
}
template <typename Char>
const Char* String::GetChars(
- const DisallowGarbageCollection& no_gc,
+ PtrComprCageBase cage_base, const DisallowGarbageCollection& no_gc,
const SharedStringAccessGuardIfNeeded& access_guard) const {
- return StringShape(*this).IsExternal()
- ? CharTraits<Char>::ExternalString::cast(*this).GetChars()
+ return StringShape(*this, cage_base).IsExternal()
+ ? CharTraits<Char>::ExternalString::cast(*this).GetChars(cage_base)
: CharTraits<Char>::String::cast(*this).GetChars(no_gc,
access_guard);
}
@@ -617,45 +626,53 @@ Handle<String> String::Flatten(LocalIsolate* isolate, Handle<String> string,
return string;
}
+uint16_t String::Get(int index) const {
+ DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
+ return GetImpl(index, GetPtrComprCageBase(*this),
+ SharedStringAccessGuardIfNeeded::NotNeeded());
+}
+
uint16_t String::Get(int index, Isolate* isolate) const {
SharedStringAccessGuardIfNeeded scope(isolate);
- return GetImpl(index, scope);
+ return GetImpl(index, isolate, scope);
}
uint16_t String::Get(int index, LocalIsolate* local_isolate) const {
SharedStringAccessGuardIfNeeded scope(local_isolate);
- return GetImpl(index, scope);
+ return GetImpl(index, local_isolate, scope);
}
uint16_t String::Get(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const {
- return GetImpl(index, access_guard);
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
+ return GetImpl(index, cage_base, access_guard);
}
uint16_t String::GetImpl(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const {
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
DCHECK(index >= 0 && index < length());
class StringGetDispatcher : public AllStatic {
public:
#define DEFINE_METHOD(Type) \
static inline uint16_t Handle##Type( \
- Type str, int index, \
+ Type str, int index, PtrComprCageBase cage_base, \
const SharedStringAccessGuardIfNeeded& access_guard) { \
- return str.Get(index, access_guard); \
+ return str.Get(index, cage_base, access_guard); \
}
STRING_CLASS_TYPES(DEFINE_METHOD)
#undef DEFINE_METHOD
static inline uint16_t HandleInvalidString(
- String str, int index,
+ String str, int index, PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded& access_guard) {
UNREACHABLE();
}
};
return StringShape(*this)
- .DispatchToSpecificType<StringGetDispatcher, uint16_t>(*this, index,
- access_guard);
+ .DispatchToSpecificType<StringGetDispatcher, uint16_t>(
+ *this, index, cage_base, access_guard);
}
void String::Set(int index, uint16_t value) {
@@ -667,9 +684,11 @@ void String::Set(int index, uint16_t value) {
: SeqTwoByteString::cast(*this).SeqTwoByteStringSet(index, value);
}
-bool String::IsFlat() const {
- if (!StringShape(*this).IsCons()) return true;
- return ConsString::cast(*this).second().length() == 0;
+bool String::IsFlat() const { return IsFlat(GetPtrComprCageBase(*this)); }
+
+bool String::IsFlat(PtrComprCageBase cage_base) const {
+ if (!StringShape(*this, cage_base).IsCons()) return true;
+ return ConsString::cast(*this).second(cage_base).length() == 0;
}
String String::GetUnderlying() const {
@@ -701,9 +720,10 @@ ConsString String::VisitFlat(
int slice_offset = offset;
const int length = string.length();
DCHECK(offset <= length);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(string);
while (true) {
- int32_t type = string.map().instance_type();
- switch (type & (kStringRepresentationMask | kStringEncodingMask)) {
+ int32_t tag = StringShape(string, cage_base).full_representation_tag();
+ switch (tag) {
case kSeqStringTag | kOneByteStringTag:
visitor->VisitOneByteString(
SeqOneByteString::cast(string).GetChars(no_gc, access_guard) +
@@ -720,13 +740,15 @@ ConsString String::VisitFlat(
case kExternalStringTag | kOneByteStringTag:
visitor->VisitOneByteString(
- ExternalOneByteString::cast(string).GetChars() + slice_offset,
+ ExternalOneByteString::cast(string).GetChars(cage_base) +
+ slice_offset,
length - offset);
return ConsString();
case kExternalStringTag | kTwoByteStringTag:
visitor->VisitTwoByteString(
- ExternalTwoByteString::cast(string).GetChars() + slice_offset,
+ ExternalTwoByteString::cast(string).GetChars(cage_base) +
+ slice_offset,
length - offset);
return ConsString();
@@ -734,7 +756,7 @@ ConsString String::VisitFlat(
case kSlicedStringTag | kTwoByteStringTag: {
SlicedString slicedString = SlicedString::cast(string);
slice_offset += slicedString.offset();
- string = slicedString.parent();
+ string = slicedString.parent(cage_base);
continue;
}
@@ -744,7 +766,7 @@ ConsString String::VisitFlat(
case kThinStringTag | kOneByteStringTag:
case kThinStringTag | kTwoByteStringTag:
- string = ThinString::cast(string).actual();
+ string = ThinString::cast(string).actual(cage_base);
continue;
default:
@@ -771,11 +793,13 @@ inline base::Vector<const base::uc16> String::GetCharVector(
uint8_t SeqOneByteString::Get(int index) const {
DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
- return Get(index, SharedStringAccessGuardIfNeeded::NotNeeded());
+ return Get(index, GetPtrComprCageBase(*this),
+ SharedStringAccessGuardIfNeeded::NotNeeded());
}
uint8_t SeqOneByteString::Get(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const {
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
USE(access_guard);
DCHECK(index >= 0 && index < length());
return ReadField<byte>(kHeaderSize + index * kCharSize);
@@ -825,7 +849,8 @@ base::uc16* SeqTwoByteString::GetChars(
}
uint16_t SeqTwoByteString::Get(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const {
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
USE(access_guard);
DCHECK(index >= 0 && index < length());
return ReadField<uint16_t>(kHeaderSize + index * kShortSize);
@@ -929,11 +954,13 @@ DEF_GETTER(ExternalOneByteString, mutable_resource,
void ExternalOneByteString::update_data_cache(Isolate* isolate) {
DisallowGarbageCollection no_gc;
if (is_uncached()) {
- if (resource()->IsCacheable()) mutable_resource()->UpdateDataCache();
+ if (resource(isolate)->IsCacheable())
+ mutable_resource(isolate)->UpdateDataCache();
} else {
- WriteExternalPointerField(kResourceDataOffset, isolate,
- reinterpret_cast<Address>(resource()->data()),
- kExternalStringResourceDataTag);
+ WriteExternalPointerField(
+ kResourceDataOffset, isolate,
+ reinterpret_cast<Address>(resource(isolate)->data()),
+ kExternalStringResourceDataTag);
}
}
@@ -954,13 +981,15 @@ void ExternalOneByteString::set_resource(
if (resource != nullptr) update_data_cache(isolate);
}
-const uint8_t* ExternalOneByteString::GetChars() const {
+const uint8_t* ExternalOneByteString::GetChars(
+ PtrComprCageBase cage_base) const {
DisallowGarbageCollection no_gc;
+ auto res = resource(cage_base);
if (is_uncached()) {
- if (resource()->IsCacheable()) {
+ if (res->IsCacheable()) {
// TODO(solanes): Teach TurboFan/CSA to not bailout to the runtime to
// avoid this call.
- return reinterpret_cast<const uint8_t*>(resource()->cached_data());
+ return reinterpret_cast<const uint8_t*>(res->cached_data());
}
#if DEBUG
// Check that this method is called only from the main thread if we have an
@@ -973,14 +1002,15 @@ const uint8_t* ExternalOneByteString::GetChars() const {
#endif
}
- return reinterpret_cast<const uint8_t*>(resource()->data());
+ return reinterpret_cast<const uint8_t*>(res->data());
}
uint8_t ExternalOneByteString::Get(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const {
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
USE(access_guard);
DCHECK(index >= 0 && index < length());
- return GetChars()[index];
+ return GetChars(cage_base)[index];
}
DEF_GETTER(ExternalTwoByteString, resource,
@@ -996,11 +1026,13 @@ DEF_GETTER(ExternalTwoByteString, mutable_resource,
void ExternalTwoByteString::update_data_cache(Isolate* isolate) {
DisallowGarbageCollection no_gc;
if (is_uncached()) {
- if (resource()->IsCacheable()) mutable_resource()->UpdateDataCache();
+ if (resource(isolate)->IsCacheable())
+ mutable_resource(isolate)->UpdateDataCache();
} else {
- WriteExternalPointerField(kResourceDataOffset, isolate,
- reinterpret_cast<Address>(resource()->data()),
- kExternalStringResourceDataTag);
+ WriteExternalPointerField(
+ kResourceDataOffset, isolate,
+ reinterpret_cast<Address>(resource(isolate)->data()),
+ kExternalStringResourceDataTag);
}
}
@@ -1021,13 +1053,15 @@ void ExternalTwoByteString::set_resource(
if (resource != nullptr) update_data_cache(isolate);
}
-const uint16_t* ExternalTwoByteString::GetChars() const {
+const uint16_t* ExternalTwoByteString::GetChars(
+ PtrComprCageBase cage_base) const {
DisallowGarbageCollection no_gc;
+ auto res = resource(cage_base);
if (is_uncached()) {
- if (resource()->IsCacheable()) {
+ if (res->IsCacheable()) {
// TODO(solanes): Teach TurboFan/CSA to not bailout to the runtime to
// avoid this call.
- return resource()->cached_data();
+ return res->cached_data();
}
#if DEBUG
// Check that this method is called only from the main thread if we have an
@@ -1040,19 +1074,20 @@ const uint16_t* ExternalTwoByteString::GetChars() const {
#endif
}
- return resource()->data();
+ return res->data();
}
uint16_t ExternalTwoByteString::Get(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const {
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
USE(access_guard);
DCHECK(index >= 0 && index < length());
- return GetChars()[index];
+ return GetChars(cage_base)[index];
}
const uint16_t* ExternalTwoByteString::ExternalTwoByteStringGetData(
unsigned start) {
- return GetChars() + start;
+ return GetChars(GetPtrComprCageBase(*this)) + start;
}
int ConsStringIterator::OffsetForDepth(int depth) { return depth & kDepthMask; }
diff --git a/deps/v8/src/objects/string-table.cc b/deps/v8/src/objects/string-table.cc
index cff50bea79..d480901095 100644
--- a/deps/v8/src/objects/string-table.cc
+++ b/deps/v8/src/objects/string-table.cc
@@ -574,13 +574,14 @@ Address StringTable::Data::TryStringToIndexOrLookupExisting(Isolate* isolate,
std::unique_ptr<Char[]> buffer;
const Char* chars;
- if (source.IsConsString()) {
- DCHECK(!source.IsFlat());
+ SharedStringAccessGuardIfNeeded access_guard(isolate);
+ if (source.IsConsString(isolate)) {
+ DCHECK(!source.IsFlat(isolate));
buffer.reset(new Char[length]);
- String::WriteToFlat(source, buffer.get(), 0, length);
+ String::WriteToFlat(source, buffer.get(), 0, length, isolate, access_guard);
chars = buffer.get();
} else {
- chars = source.GetChars<Char>(no_gc) + start;
+ chars = source.GetChars<Char>(isolate, no_gc, access_guard) + start;
}
// TODO(verwaest): Internalize to one-byte when possible.
SequentialStringKey<Char> key(base::Vector<const Char>(chars, length), seed);
diff --git a/deps/v8/src/objects/string.cc b/deps/v8/src/objects/string.cc
index 4b18ee3d05..7e951b428d 100644
--- a/deps/v8/src/objects/string.cc
+++ b/deps/v8/src/objects/string.cc
@@ -15,6 +15,7 @@
#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-heap.h"
#include "src/numbers/conversions.h"
+#include "src/objects/instance-type.h"
#include "src/objects/map.h"
#include "src/objects/oddball.h"
#include "src/objects/string-comparator.h"
@@ -546,29 +547,30 @@ String::FlatContent String::GetFlatContent(
}
#endif
USE(no_gc);
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
int length = this->length();
- StringShape shape(*this);
+ StringShape shape(*this, cage_base);
String string = *this;
int offset = 0;
if (shape.representation_tag() == kConsStringTag) {
ConsString cons = ConsString::cast(string);
- if (cons.second().length() != 0) {
+ if (cons.second(cage_base).length() != 0) {
return FlatContent(no_gc);
}
- string = cons.first();
- shape = StringShape(string);
+ string = cons.first(cage_base);
+ shape = StringShape(string, cage_base);
} else if (shape.representation_tag() == kSlicedStringTag) {
SlicedString slice = SlicedString::cast(string);
offset = slice.offset();
- string = slice.parent();
- shape = StringShape(string);
+ string = slice.parent(cage_base);
+ shape = StringShape(string, cage_base);
DCHECK(shape.representation_tag() != kConsStringTag &&
shape.representation_tag() != kSlicedStringTag);
}
if (shape.representation_tag() == kThinStringTag) {
ThinString thin = ThinString::cast(string);
- string = thin.actual();
- shape = StringShape(string);
+ string = thin.actual(cage_base);
+ shape = StringShape(string, cage_base);
DCHECK(!shape.IsCons());
DCHECK(!shape.IsSliced());
}
@@ -577,7 +579,7 @@ String::FlatContent String::GetFlatContent(
if (shape.representation_tag() == kSeqStringTag) {
start = SeqOneByteString::cast(string).GetChars(no_gc);
} else {
- start = ExternalOneByteString::cast(string).GetChars();
+ start = ExternalOneByteString::cast(string).GetChars(cage_base);
}
return FlatContent(start + offset, length, no_gc);
} else {
@@ -586,7 +588,7 @@ String::FlatContent String::GetFlatContent(
if (shape.representation_tag() == kSeqStringTag) {
start = SeqTwoByteString::cast(string).GetChars(no_gc);
} else {
- start = ExternalTwoByteString::cast(string).GetChars();
+ start = ExternalTwoByteString::cast(string).GetChars(cage_base);
}
return FlatContent(start + offset, length, no_gc);
}
@@ -645,104 +647,113 @@ std::unique_ptr<char[]> String::ToCString(AllowNullsFlag allow_nulls,
// static
template <typename sinkchar>
-void String::WriteToFlat(String source, sinkchar* sink, int from, int to) {
+void String::WriteToFlat(String source, sinkchar* sink, int start, int length) {
DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(source));
- return WriteToFlat(source, sink, from, to,
+ return WriteToFlat(source, sink, start, length, GetPtrComprCageBase(source),
SharedStringAccessGuardIfNeeded::NotNeeded());
}
// static
template <typename sinkchar>
-void String::WriteToFlat(String source, sinkchar* sink, int from, int to,
+void String::WriteToFlat(String source, sinkchar* sink, int start, int length,
+ PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded& access_guard) {
DisallowGarbageCollection no_gc;
- while (from < to) {
- DCHECK_LE(0, from);
- DCHECK_LE(to, source.length());
- switch (StringShape(source).full_representation_tag()) {
- case kOneByteStringTag | kExternalStringTag: {
- CopyChars(sink, ExternalOneByteString::cast(source).GetChars() + from,
- to - from);
- return;
- }
- case kTwoByteStringTag | kExternalStringTag: {
- const base::uc16* data = ExternalTwoByteString::cast(source).GetChars();
- CopyChars(sink, data + from, to - from);
- return;
- }
- case kOneByteStringTag | kSeqStringTag: {
+ if (length == 0) return;
+ while (true) {
+ DCHECK_LT(0, length);
+ DCHECK_LE(0, start);
+ DCHECK_LE(length, source.length());
+ switch (StringShape(source, cage_base).full_representation_tag()) {
+ case kOneByteStringTag | kExternalStringTag:
CopyChars(
sink,
- SeqOneByteString::cast(source).GetChars(no_gc, access_guard) + from,
- to - from);
+ ExternalOneByteString::cast(source).GetChars(cage_base) + start,
+ length);
return;
- }
- case kTwoByteStringTag | kSeqStringTag: {
+ case kTwoByteStringTag | kExternalStringTag:
CopyChars(
sink,
- SeqTwoByteString::cast(source).GetChars(no_gc, access_guard) + from,
- to - from);
+ ExternalTwoByteString::cast(source).GetChars(cage_base) + start,
+ length);
+ return;
+ case kOneByteStringTag | kSeqStringTag:
+ CopyChars(sink,
+ SeqOneByteString::cast(source).GetChars(no_gc, access_guard) +
+ start,
+ length);
+ return;
+ case kTwoByteStringTag | kSeqStringTag:
+ CopyChars(sink,
+ SeqTwoByteString::cast(source).GetChars(no_gc, access_guard) +
+ start,
+ length);
return;
- }
case kOneByteStringTag | kConsStringTag:
case kTwoByteStringTag | kConsStringTag: {
ConsString cons_string = ConsString::cast(source);
- String first = cons_string.first();
+ String first = cons_string.first(cage_base);
int boundary = first.length();
- if (to - boundary >= boundary - from) {
+ int first_length = boundary - start;
+ int second_length = start + length - boundary;
+ if (second_length >= first_length) {
// Right hand side is longer. Recurse over left.
- if (from < boundary) {
- WriteToFlat(first, sink, from, boundary, access_guard);
- if (from == 0 && cons_string.second() == first) {
+ if (first_length > 0) {
+ WriteToFlat(first, sink, start, first_length, cage_base,
+ access_guard);
+ if (start == 0 && cons_string.second(cage_base) == first) {
CopyChars(sink + boundary, sink, boundary);
return;
}
- sink += boundary - from;
- from = 0;
+ sink += boundary - start;
+ start = 0;
+ length -= first_length;
} else {
- from -= boundary;
+ start -= boundary;
}
- to -= boundary;
- source = cons_string.second();
+ source = cons_string.second(cage_base);
} else {
// Left hand side is longer. Recurse over right.
- if (to > boundary) {
- String second = cons_string.second();
+ if (second_length > 0) {
+ String second = cons_string.second(cage_base);
// When repeatedly appending to a string, we get a cons string that
// is unbalanced to the left, a list, essentially. We inline the
// common case of sequential one-byte right child.
- if (to - boundary == 1) {
- sink[boundary - from] = static_cast<sinkchar>(second.Get(0));
- } else if (second.IsSeqOneByteString()) {
+ if (second_length == 1) {
+ sink[boundary - start] =
+ static_cast<sinkchar>(second.Get(0, cage_base, access_guard));
+ } else if (second.IsSeqOneByteString(cage_base)) {
CopyChars(
- sink + boundary - from,
+ sink + boundary - start,
SeqOneByteString::cast(second).GetChars(no_gc, access_guard),
- to - boundary);
+ second_length);
} else {
- WriteToFlat(second, sink + boundary - from, 0, to - boundary,
- access_guard);
+ WriteToFlat(second, sink + boundary - start, 0, second_length,
+ cage_base, access_guard);
}
- to = boundary;
+ length -= second_length;
}
source = first;
}
- break;
+ if (length == 0) return;
+ continue;
}
case kOneByteStringTag | kSlicedStringTag:
case kTwoByteStringTag | kSlicedStringTag: {
SlicedString slice = SlicedString::cast(source);
unsigned offset = slice.offset();
- WriteToFlat(slice.parent(), sink, from + offset, to + offset,
- access_guard);
- return;
+ source = slice.parent(cage_base);
+ start += offset;
+ continue;
}
case kOneByteStringTag | kThinStringTag:
case kTwoByteStringTag | kThinStringTag:
- source = ThinString::cast(source).actual();
- break;
+ source = ThinString::cast(source).actual(cage_base);
+ continue;
}
+ UNREACHABLE();
}
- DCHECK_EQ(from, to);
+ UNREACHABLE();
}
template <typename SourceChar>
@@ -819,12 +830,15 @@ bool String::SlowEquals(
if (len != other.length()) return false;
if (len == 0) return true;
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+
// Fast check: if at least one ThinString is involved, dereference it/them
// and restart.
- if (this->IsThinString() || other.IsThinString()) {
- if (other.IsThinString()) other = ThinString::cast(other).actual();
- if (this->IsThinString()) {
- return ThinString::cast(*this).actual().Equals(other);
+ if (this->IsThinString(cage_base) || other.IsThinString(cage_base)) {
+ if (other.IsThinString(cage_base))
+ other = ThinString::cast(other).actual(cage_base);
+ if (this->IsThinString(cage_base)) {
+ return ThinString::cast(*this).actual(cage_base).Equals(other);
} else {
return this->Equals(other);
}
@@ -852,7 +866,9 @@ bool String::SlowEquals(
// We know the strings are both non-empty. Compare the first chars
// before we try to flatten the strings.
- if (this->Get(0, access_guard) != other.Get(0, access_guard)) return false;
+ if (this->Get(0, cage_base, access_guard) !=
+ other.Get(0, cage_base, access_guard))
+ return false;
if (IsSeqOneByteString() && other.IsSeqOneByteString()) {
const uint8_t* str1 =
@@ -1348,6 +1364,7 @@ namespace {
template <typename Char>
uint32_t HashString(String string, size_t start, int length, uint64_t seed,
+ PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded& access_guard) {
DisallowGarbageCollection no_gc;
@@ -1358,14 +1375,15 @@ uint32_t HashString(String string, size_t start, int length, uint64_t seed,
std::unique_ptr<Char[]> buffer;
const Char* chars;
- if (string.IsConsString()) {
+ if (string.IsConsString(cage_base)) {
DCHECK_EQ(0, start);
DCHECK(!string.IsFlat());
buffer.reset(new Char[length]);
- String::WriteToFlat(string, buffer.get(), 0, length, access_guard);
+ String::WriteToFlat(string, buffer.get(), 0, length, cage_base,
+ access_guard);
chars = buffer.get();
} else {
- chars = string.GetChars<Char>(no_gc, access_guard) + start;
+ chars = string.GetChars<Char>(cage_base, no_gc, access_guard) + start;
}
return StringHasher::HashSequentialString<Char>(chars, length, seed);
@@ -1387,25 +1405,32 @@ uint32_t String::ComputeAndSetHash(
uint64_t seed = HashSeed(GetReadOnlyRoots());
size_t start = 0;
String string = *this;
- if (string.IsSlicedString()) {
+ PtrComprCageBase cage_base = GetPtrComprCageBase(string);
+ StringShape shape(string, cage_base);
+ if (shape.IsSliced()) {
SlicedString sliced = SlicedString::cast(string);
start = sliced.offset();
- string = sliced.parent();
+ string = sliced.parent(cage_base);
+ shape = StringShape(string, cage_base);
}
- if (string.IsConsString() && string.IsFlat()) {
- string = ConsString::cast(string).first();
+ if (shape.IsCons() && string.IsFlat(cage_base)) {
+ string = ConsString::cast(string).first(cage_base);
+ shape = StringShape(string, cage_base);
}
- if (string.IsThinString()) {
- string = ThinString::cast(string).actual();
+ if (shape.IsThin()) {
+ string = ThinString::cast(string).actual(cage_base);
+ shape = StringShape(string, cage_base);
if (length() == string.length()) {
set_raw_hash_field(string.raw_hash_field());
return hash();
}
}
uint32_t raw_hash_field =
- string.IsOneByteRepresentation()
- ? HashString<uint8_t>(string, start, length(), seed, access_guard)
- : HashString<uint16_t>(string, start, length(), seed, access_guard);
+ shape.encoding_tag() == kOneByteStringTag
+ ? HashString<uint8_t>(string, start, length(), seed, cage_base,
+ access_guard)
+ : HashString<uint16_t>(string, start, length(), seed, cage_base,
+ access_guard);
set_raw_hash_field(raw_hash_field);
// Check the hash code is there.
@@ -1453,6 +1478,13 @@ void String::PrintOn(FILE* file) {
}
}
+void String::PrintOn(std::ostream& ostream) {
+ int length = this->length();
+ for (int i = 0; i < length; i++) {
+ ostream.put(Get(i));
+ }
+}
+
Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
if (new_length == 0) return string->GetReadOnlyRoots().empty_string_handle();
@@ -1502,29 +1534,30 @@ void SeqTwoByteString::clear_padding() {
}
uint16_t ConsString::Get(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const {
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
DCHECK(index >= 0 && index < this->length());
// Check for a flattened cons string
- if (second().length() == 0) {
- String left = first();
+ if (second(cage_base).length() == 0) {
+ String left = first(cage_base);
return left.Get(index);
}
String string = String::cast(*this);
while (true) {
- if (StringShape(string).IsCons()) {
+ if (StringShape(string, cage_base).IsCons()) {
ConsString cons_string = ConsString::cast(string);
String left = cons_string.first();
if (left.length() > index) {
string = left;
} else {
index -= left.length();
- string = cons_string.second();
+ string = cons_string.second(cage_base);
}
} else {
- return string.Get(index, access_guard);
+ return string.Get(index, cage_base, access_guard);
}
}
@@ -1532,13 +1565,15 @@ uint16_t ConsString::Get(
}
uint16_t ThinString::Get(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const {
- return actual().Get(index, access_guard);
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
+ return actual(cage_base).Get(index, cage_base, access_guard);
}
uint16_t SlicedString::Get(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const {
- return parent().Get(offset() + index, access_guard);
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const {
+ return parent(cage_base).Get(offset() + index, cage_base, access_guard);
}
int ExternalString::ExternalPayloadSize() const {
@@ -1705,30 +1740,39 @@ const byte* String::AddressOfCharacterAt(
int start_index, const DisallowGarbageCollection& no_gc) {
DCHECK(IsFlat());
String subject = *this;
- if (subject.IsConsString()) {
- subject = ConsString::cast(subject).first();
- } else if (subject.IsSlicedString()) {
+ PtrComprCageBase cage_base = GetPtrComprCageBase(subject);
+ StringShape shape(subject, cage_base);
+ if (subject.IsConsString(cage_base)) {
+ subject = ConsString::cast(subject).first(cage_base);
+ shape = StringShape(subject, cage_base);
+ } else if (subject.IsSlicedString(cage_base)) {
start_index += SlicedString::cast(subject).offset();
- subject = SlicedString::cast(subject).parent();
+ subject = SlicedString::cast(subject).parent(cage_base);
+ shape = StringShape(subject, cage_base);
}
- if (subject.IsThinString()) {
- subject = ThinString::cast(subject).actual();
+ if (subject.IsThinString(cage_base)) {
+ subject = ThinString::cast(subject).actual(cage_base);
+ shape = StringShape(subject, cage_base);
}
CHECK_LE(0, start_index);
CHECK_LE(start_index, subject.length());
- if (subject.IsSeqOneByteString()) {
- return reinterpret_cast<const byte*>(
- SeqOneByteString::cast(subject).GetChars(no_gc) + start_index);
- } else if (subject.IsSeqTwoByteString()) {
- return reinterpret_cast<const byte*>(
- SeqTwoByteString::cast(subject).GetChars(no_gc) + start_index);
- } else if (subject.IsExternalOneByteString()) {
- return reinterpret_cast<const byte*>(
- ExternalOneByteString::cast(subject).GetChars() + start_index);
- } else {
- DCHECK(subject.IsExternalTwoByteString());
- return reinterpret_cast<const byte*>(
- ExternalTwoByteString::cast(subject).GetChars() + start_index);
+ switch (shape.full_representation_tag()) {
+ case kOneByteStringTag | kSeqStringTag:
+ return reinterpret_cast<const byte*>(
+ SeqOneByteString::cast(subject).GetChars(no_gc) + start_index);
+ case kTwoByteStringTag | kSeqStringTag:
+ return reinterpret_cast<const byte*>(
+ SeqTwoByteString::cast(subject).GetChars(no_gc) + start_index);
+ case kOneByteStringTag | kExternalStringTag:
+ return reinterpret_cast<const byte*>(
+ ExternalOneByteString::cast(subject).GetChars(cage_base) +
+ start_index);
+ case kTwoByteStringTag | kExternalStringTag:
+ return reinterpret_cast<const byte*>(
+ ExternalTwoByteString::cast(subject).GetChars(cage_base) +
+ start_index);
+ default:
+ UNREACHABLE();
}
}
@@ -1737,10 +1781,10 @@ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void String::WriteToFlat(
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void String::WriteToFlat(
String source, uint8_t* sink, int from, int to);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void String::WriteToFlat(
- String source, uint16_t* sink, int from, int to,
+ String source, uint16_t* sink, int from, int to, PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded&);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void String::WriteToFlat(
- String source, uint8_t* sink, int from, int to,
+ String source, uint8_t* sink, int from, int to, PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded&);
namespace {
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 3bb3ba1d6e..7a0166b7af 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -10,11 +10,11 @@
#include "src/base/bits.h"
#include "src/base/export-template.h"
#include "src/base/strings.h"
+#include "src/common/globals.h"
#include "src/objects/instance-type.h"
#include "src/objects/name.h"
#include "src/objects/smi.h"
#include "src/strings/unicode-decoder.h"
-#include "torque-generated/field-offsets.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -43,6 +43,7 @@ enum RobustnessFlag { ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL };
class StringShape {
public:
inline explicit StringShape(const String s);
+ inline explicit StringShape(const String s, PtrComprCageBase cage_base);
inline explicit StringShape(Map s);
inline explicit StringShape(InstanceType t);
inline bool IsSequential() const;
@@ -183,12 +184,13 @@ class String : public TorqueGeneratedString<String, Name> {
// SharedStringAccessGuard is not needed (i.e. on the main thread or on
// read-only strings).
template <typename Char>
- inline const Char* GetChars(const DisallowGarbageCollection& no_gc) const;
+ inline const Char* GetChars(PtrComprCageBase cage_base,
+ const DisallowGarbageCollection& no_gc) const;
// Get chars from sequential or external strings.
template <typename Char>
inline const Char* GetChars(
- const DisallowGarbageCollection& no_gc,
+ PtrComprCageBase cage_base, const DisallowGarbageCollection& no_gc,
const SharedStringAccessGuardIfNeeded& access_guard) const;
// Returns the address of the character at an offset into this string.
@@ -220,13 +222,15 @@ class String : public TorqueGeneratedString<String, Name> {
// to this method are not efficient unless the string is flat.
// If it is called from a background thread, the LocalIsolate version should
// be used.
- V8_INLINE uint16_t Get(int index, Isolate* isolate = nullptr) const;
+ V8_INLINE uint16_t Get(int index) const;
+ V8_INLINE uint16_t Get(int index, Isolate* isolate) const;
V8_INLINE uint16_t Get(int index, LocalIsolate* local_isolate) const;
// Method to pass down the access_guard. Useful for recursive calls such as
// ThinStrings where we go String::Get into ThinString::Get into String::Get
// again for the internalized string.
V8_INLINE uint16_t
- Get(int index, const SharedStringAccessGuardIfNeeded& access_guard) const;
+ Get(int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const;
// ES6 section 7.1.3.1 ToNumber Applied to the String Type
static Handle<Object> ToNumber(Isolate* isolate, Handle<String> subject);
@@ -403,6 +407,7 @@ class String : public TorqueGeneratedString<String, Name> {
enum TrimMode { kTrim, kTrimStart, kTrimEnd };
V8_EXPORT_PRIVATE void PrintOn(FILE* out);
+ V8_EXPORT_PRIVATE void PrintOn(std::ostream& out);
// For use during stack traces. Performs rudimentary sanity check.
bool LooksValid();
@@ -428,6 +433,7 @@ class String : public TorqueGeneratedString<String, Name> {
DECL_VERIFIER(String)
inline bool IsFlat() const;
+ inline bool IsFlat(PtrComprCageBase cage_base) const;
// Max char codes.
static const int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar;
@@ -473,6 +479,7 @@ class String : public TorqueGeneratedString<String, Name> {
static void WriteToFlat(String source, sinkchar* sink, int from, int to);
template <typename sinkchar>
static void WriteToFlat(String source, sinkchar* sink, int from, int to,
+ PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded&);
static inline bool IsAscii(const char* chars, int length) {
@@ -550,7 +557,8 @@ class String : public TorqueGeneratedString<String, Name> {
// Implementation of the Get() public methods. Do not use directly.
V8_INLINE uint16_t
- GetImpl(int index, const SharedStringAccessGuardIfNeeded& access_guard) const;
+ GetImpl(int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const;
// Implementation of the IsEqualTo() public methods. Do not use directly.
template <EqualityType kEqType, typename Char>
@@ -595,11 +603,13 @@ void String::WriteToFlat(String source, uint8_t* sink, int from, int to);
extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
void String::WriteToFlat(String source, uint16_t* sink, int from, int to);
extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
-void String::WriteToFlat(String source, uint8_t* sink, int from, int to ,
- const SharedStringAccessGuardIfNeeded&);
+void String::WriteToFlat(String source, uint8_t* sink, int from, int to,
+ PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded&);
extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
void String::WriteToFlat(String source, uint16_t* sink, int from, int to,
- const SharedStringAccessGuardIfNeeded&);
+ PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded&);
// clang-format on
class SubStringRange {
@@ -649,7 +659,7 @@ class SeqOneByteString
// defined for convenience and it will check that the access guard is not
// needed.
inline uint8_t Get(int index) const;
- inline uint8_t Get(int index,
+ inline uint8_t Get(int index, PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded& access_guard) const;
inline void SeqOneByteStringSet(int index, uint16_t value);
@@ -697,7 +707,8 @@ class SeqTwoByteString
// Dispatched behavior.
inline uint16_t Get(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const;
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const;
inline void SeqTwoByteStringSet(int index, uint16_t value);
// Get the address of the characters in this string.
@@ -755,7 +766,8 @@ class ConsString : public TorqueGeneratedConsString<ConsString, String> {
// Dispatched behavior.
V8_EXPORT_PRIVATE uint16_t
- Get(int index, const SharedStringAccessGuardIfNeeded& access_guard) const;
+ Get(int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const;
// Minimum length for a cons string.
static const int kMinLength = 13;
@@ -779,7 +791,8 @@ class ThinString : public TorqueGeneratedThinString<ThinString, String> {
DECL_GETTER(unchecked_actual, HeapObject)
V8_EXPORT_PRIVATE uint16_t
- Get(int index, const SharedStringAccessGuardIfNeeded& access_guard) const;
+ Get(int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const;
DECL_VERIFIER(ThinString)
@@ -804,7 +817,8 @@ class SlicedString : public TorqueGeneratedSlicedString<SlicedString, String> {
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Dispatched behavior.
V8_EXPORT_PRIVATE uint16_t
- Get(int index, const SharedStringAccessGuardIfNeeded& access_guard) const;
+ Get(int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const;
// Minimum length for a sliced string.
static const int kMinLength = 13;
@@ -887,10 +901,10 @@ class ExternalOneByteString
// which the pointer cache has to be refreshed.
inline void update_data_cache(Isolate* isolate);
- inline const uint8_t* GetChars() const;
+ inline const uint8_t* GetChars(PtrComprCageBase cage_base) const;
// Dispatched behavior.
- inline uint8_t Get(int index,
+ inline uint8_t Get(int index, PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded& access_guard) const;
class BodyDescriptor;
@@ -930,11 +944,12 @@ class ExternalTwoByteString
// which the pointer cache has to be refreshed.
inline void update_data_cache(Isolate* isolate);
- inline const uint16_t* GetChars() const;
+ inline const uint16_t* GetChars(PtrComprCageBase cage_base) const;
// Dispatched behavior.
inline uint16_t Get(
- int index, const SharedStringAccessGuardIfNeeded& access_guard) const;
+ int index, PtrComprCageBase cage_base,
+ const SharedStringAccessGuardIfNeeded& access_guard) const;
// For regexp code.
inline const uint16_t* ExternalTwoByteStringGetData(unsigned start);
diff --git a/deps/v8/src/objects/string.tq b/deps/v8/src/objects/string.tq
index 9ab35d1e00..68c280de3a 100644
--- a/deps/v8/src/objects/string.tq
+++ b/deps/v8/src/objects/string.tq
@@ -128,7 +128,7 @@ type DirectString extends String;
macro AllocateNonEmptySeqOneByteString<Iterator: type>(
length: uint32, content: Iterator): SeqOneByteString {
- assert(length != 0 && length <= kStringMaxLength);
+ dcheck(length != 0 && length <= kStringMaxLength);
return new SeqOneByteString{
map: kOneByteStringMap,
raw_hash_field: kNameEmptyHashField,
@@ -139,7 +139,7 @@ macro AllocateNonEmptySeqOneByteString<Iterator: type>(
macro AllocateNonEmptySeqTwoByteString<Iterator: type>(
length: uint32, content: Iterator): SeqTwoByteString {
- assert(length > 0 && length <= kStringMaxLength);
+ dcheck(length > 0 && length <= kStringMaxLength);
return new SeqTwoByteString{
map: kStringMap,
raw_hash_field: kNameEmptyHashField,
@@ -177,8 +177,10 @@ macro AllocateSeqTwoByteString(length: uint32): SeqTwoByteString|EmptyString {
return AllocateSeqTwoByteString(length, UninitializedIterator{});
}
-extern macro StringWriteToFlatOneByte(String, RawPtr<char8>, int32, int32);
-extern macro StringWriteToFlatTwoByte(String, RawPtr<char16>, int32, int32);
+extern macro StringWriteToFlatOneByte(
+ String, RawPtr<char8>, int32, int32): void;
+extern macro StringWriteToFlatTwoByte(
+ String, RawPtr<char16>, int32, int32): void;
// Corresponds to String::SlowFlatten in the C++ runtime.
builtin StringSlowFlatten(cons: ConsString): String {
@@ -222,7 +224,7 @@ macro Flatten(string: String): String {
return Flatten(cons);
}
case (thin: ThinString): {
- assert(!Is<ConsString>(thin.actual));
+ dcheck(!Is<ConsString>(thin.actual));
return thin.actual;
}
case (other: String): {
diff --git a/deps/v8/src/objects/struct.h b/deps/v8/src/objects/struct.h
index 2cc51c8544..41a4b2b481 100644
--- a/deps/v8/src/objects/struct.h
+++ b/deps/v8/src/objects/struct.h
@@ -69,9 +69,6 @@ class AccessorPair : public TorqueGeneratedAccessorPair<AccessorPair, Struct> {
inline bool Equals(Object getter_value, Object setter_value);
- // Dispatched behavior.
- DECL_PRINTER(AccessorPair)
-
TQ_OBJECT_CONSTRUCTORS(AccessorPair)
};
@@ -79,7 +76,6 @@ class ClassPositions
: public TorqueGeneratedClassPositions<ClassPositions, Struct> {
public:
// Dispatched behavior.
- DECL_PRINTER(ClassPositions)
void BriefPrintDetails(std::ostream& os);
TQ_OBJECT_CONSTRUCTORS(ClassPositions)
diff --git a/deps/v8/src/objects/struct.tq b/deps/v8/src/objects/struct.tq
index ec9782bab0..9c87663fd2 100644
--- a/deps/v8/src/objects/struct.tq
+++ b/deps/v8/src/objects/struct.tq
@@ -3,11 +3,9 @@
// found in the LICENSE file.
@abstract
-@generatePrint
extern class Struct extends HeapObject {
}
-@generatePrint
extern class Tuple2 extends Struct {
value1: Object;
value2: Object;
diff --git a/deps/v8/src/objects/swiss-hash-table-helpers.tq b/deps/v8/src/objects/swiss-hash-table-helpers.tq
index 627fde7297..0d8543d5d1 100644
--- a/deps/v8/src/objects/swiss-hash-table-helpers.tq
+++ b/deps/v8/src/objects/swiss-hash-table-helpers.tq
@@ -31,7 +31,7 @@ extern macro LoadSwissNameDictionaryCtrlTableGroup(intptr): uint64;
// Counterpart to swiss_table::ProbeSequence in C++ implementation.
struct ProbeSequence {
- macro Next() {
+ macro Next(): void {
this.index = this.index + Unsigned(FromConstexpr<int32>(kGroupWidth));
this.offset = (this.offset + this.index) & this.mask;
}
@@ -64,7 +64,7 @@ struct ByteMask {
}
// Counterpart to operator++() in C++ version.
- macro ClearLowestSetBit() {
+ macro ClearLowestSetBit(): void {
this.mask = ClearLowestSetBit<uint64>(this.mask);
}
@@ -83,7 +83,7 @@ struct BitMask {
}
// Counterpart to operator++() in C++ version.
- macro ClearLowestSetBit() {
+ macro ClearLowestSetBit(): void {
this.mask = ClearLowestSetBit<uint32>(this.mask);
}
diff --git a/deps/v8/src/objects/swiss-name-dictionary.tq b/deps/v8/src/objects/swiss-name-dictionary.tq
index 803014448e..c1c1d40616 100644
--- a/deps/v8/src/objects/swiss-name-dictionary.tq
+++ b/deps/v8/src/objects/swiss-name-dictionary.tq
@@ -4,7 +4,6 @@
#include 'src/objects/swiss-name-dictionary.h'
-@noVerifier
@doNotGenerateCppClass
extern class SwissNameDictionary extends HeapObject {
hash: uint32;
@@ -32,13 +31,13 @@ const kNotFoundSentinel:
extern macro LoadSwissNameDictionaryKey(SwissNameDictionary, intptr): Name;
extern macro StoreSwissNameDictionaryKeyAndValue(
- SwissNameDictionary, intptr, Object, Object);
+ SwissNameDictionary, intptr, Object, Object): void;
extern macro SwissNameDictionarySetCtrl(
- SwissNameDictionary, intptr, intptr, uint8);
+ SwissNameDictionary, intptr, intptr, uint8): void;
extern macro StoreSwissNameDictionaryPropertyDetails(
- SwissNameDictionary, intptr, intptr, uint8);
+ SwissNameDictionary, intptr, intptr, uint8): void;
extern macro
SwissNameDictionaryIncreaseElementCountOrBailout(
@@ -46,7 +45,7 @@ SwissNameDictionaryIncreaseElementCountOrBailout(
extern macro
StoreSwissNameDictionaryEnumToEntryMapping(
- SwissNameDictionary, intptr, intptr, int32);
+ SwissNameDictionary, intptr, intptr, int32): void;
extern macro
SwissNameDictionaryUpdateCountsForDeletion(ByteArray, intptr): uint32;
@@ -70,10 +69,10 @@ macro SwissNameDictionaryCapacityFor(atLeastSpaceFor: intptr): intptr {
} else if (atLeastSpaceFor < kSwissNameDictionaryInitialCapacity) {
return 4;
} else if (FromConstexpr<bool>(kGroupWidth == 16)) {
- assert(atLeastSpaceFor == 4);
+ dcheck(atLeastSpaceFor == 4);
return 4;
} else if (FromConstexpr<bool>(kGroupWidth == 8)) {
- assert(atLeastSpaceFor == 4);
+ dcheck(atLeastSpaceFor == 4);
return 8;
}
}
@@ -85,7 +84,7 @@ macro SwissNameDictionaryCapacityFor(atLeastSpaceFor: intptr): intptr {
// Counterpart for SwissNameDictionary::MaxUsableCapacity in C++.
@export
macro SwissNameDictionaryMaxUsableCapacity(capacity: intptr): intptr {
- assert(capacity == 0 || capacity >= kSwissNameDictionaryInitialCapacity);
+ dcheck(capacity == 0 || capacity >= kSwissNameDictionaryInitialCapacity);
if (FromConstexpr<bool>(kGroupWidth == 8) && capacity == 4) {
// If the group size is 16 we can fully utilize capacity 4: There will be
// enough kEmpty entries in the ctrl table.
@@ -147,7 +146,7 @@ macro SwissNameDictionaryCtrlTableStartOffsetMT(capacity: intptr): intptr {
macro Probe(hash: uint32, mask: uint32): ProbeSequence {
// Mask must be a power of 2 minus 1.
- assert(((mask + 1) & mask) == 0);
+ dcheck(((mask + 1) & mask) == 0);
return ProbeSequence{mask: mask, offset: H1(hash) & mask, index: 0};
}
@@ -215,8 +214,7 @@ macro FindFirstEmpty<GroupLoader: type>(
macro Add<GroupLoader: type>(
table: SwissNameDictionary, key: Name, value: Object,
- propertyDetails: uint8)
- labels Bailout {
+ propertyDetails: uint8): void labels Bailout {
const capacity: intptr = Convert<intptr>(table.capacity);
const maxUsable: uint32 =
Unsigned(Convert<int32>(SwissNameDictionaryMaxUsableCapacity(capacity)));
@@ -250,9 +248,8 @@ macro Add<GroupLoader: type>(
}
@export
-macro SwissNameDictionaryDelete(table: SwissNameDictionary, entry: intptr)
- labels
- Shrunk(SwissNameDictionary) {
+macro SwissNameDictionaryDelete(table: SwissNameDictionary, entry: intptr):
+ void labels Shrunk(SwissNameDictionary) {
const capacity = Convert<intptr>(table.capacity);
// Update present and deleted element counts at once, without needing to do
@@ -305,7 +302,7 @@ Found(intptr),
@export
macro SwissNameDictionaryAddSIMD(
table: SwissNameDictionary, key: Name, value: Object,
- propertyDetails: uint8) labels Bailout {
+ propertyDetails: uint8): void labels Bailout {
Add<GroupSse2Loader>(table, key, value, propertyDetails)
otherwise Bailout;
}
@@ -313,7 +310,7 @@ macro SwissNameDictionaryAddSIMD(
@export
macro SwissNameDictionaryAddPortable(
table: SwissNameDictionary, key: Name, value: Object,
- propertyDetails: uint8) labels Bailout {
+ propertyDetails: uint8): void labels Bailout {
Add<GroupPortableLoader>(table, key, value, propertyDetails)
otherwise Bailout;
}
diff --git a/deps/v8/src/objects/synthetic-module.h b/deps/v8/src/objects/synthetic-module.h
index a8b79fb0a0..cad81b3964 100644
--- a/deps/v8/src/objects/synthetic-module.h
+++ b/deps/v8/src/objects/synthetic-module.h
@@ -24,7 +24,6 @@ class SyntheticModule
public:
NEVER_READ_ONLY_SPACE
DECL_VERIFIER(SyntheticModule)
- DECL_PRINTER(SyntheticModule)
// Set module's exported value for the specified export_name to the specified
// export_value. An error will be thrown if export_name is not one
diff --git a/deps/v8/src/objects/tagged-field.h b/deps/v8/src/objects/tagged-field.h
index 7faf9e9ac9..d9fc0bb102 100644
--- a/deps/v8/src/objects/tagged-field.h
+++ b/deps/v8/src/objects/tagged-field.h
@@ -49,7 +49,7 @@ class TaggedField : public AllStatic {
int offset = 0);
static inline void Relaxed_Store(HeapObject host, T value);
- static void Relaxed_Store(HeapObject host, int offset, T value);
+ static inline void Relaxed_Store(HeapObject host, int offset, T value);
static inline T Acquire_Load(HeapObject host, int offset = 0);
static inline T Acquire_Load_No_Unpack(PtrComprCageBase cage_base,
diff --git a/deps/v8/src/objects/template-objects.tq b/deps/v8/src/objects/template-objects.tq
index 2aa657977f..63260bfd9c 100644
--- a/deps/v8/src/objects/template-objects.tq
+++ b/deps/v8/src/objects/template-objects.tq
@@ -2,14 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@generatePrint
extern class CachedTemplateObject extends Struct {
slot_id: Smi;
template_object: JSArray;
next: CachedTemplateObject|TheHole;
}
-@generatePrint
extern class TemplateObjectDescription extends Struct {
raw_strings: FixedArray;
cooked_strings: FixedArray;
diff --git a/deps/v8/src/objects/templates.tq b/deps/v8/src/objects/templates.tq
index 9406f62d7a..a3bb7a9e35 100644
--- a/deps/v8/src/objects/templates.tq
+++ b/deps/v8/src/objects/templates.tq
@@ -11,7 +11,6 @@ extern class TemplateInfo extends Struct {
property_accessors: TemplateList|Undefined;
}
-@generatePrint
extern class FunctionTemplateRareData extends Struct {
// See DECL_RARE_ACCESSORS in FunctionTemplateInfo.
prototype_template: ObjectTemplateInfo|Undefined;
diff --git a/deps/v8/src/objects/transitions-inl.h b/deps/v8/src/objects/transitions-inl.h
index e842e5ae66..91cc906013 100644
--- a/deps/v8/src/objects/transitions-inl.h
+++ b/deps/v8/src/objects/transitions-inl.h
@@ -326,7 +326,8 @@ Handle<String> TransitionsAccessor::ExpectedTransitionKey() {
case kWeakRef: {
Map target = Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
PropertyDetails details = GetSimpleTargetDetails(target);
- if (details.location() != kField) return Handle<String>::null();
+ if (details.location() != PropertyLocation::kField)
+ return Handle<String>::null();
DCHECK_EQ(kData, details.kind());
if (details.attributes() != NONE) return Handle<String>::null();
Name name = GetSimpleTransitionKey(target);
diff --git a/deps/v8/src/objects/transitions.cc b/deps/v8/src/objects/transitions.cc
index 2bc8cf8697..0e76dc4e1b 100644
--- a/deps/v8/src/objects/transitions.cc
+++ b/deps/v8/src/objects/transitions.cc
@@ -270,7 +270,8 @@ MaybeHandle<Map> TransitionsAccessor::FindTransitionToDataProperty(
PropertyDetails details = target.GetLastDescriptorDetails(isolate_);
DCHECK_EQ(attributes, details.attributes());
DCHECK_EQ(kData, details.kind());
- if (requested_location == kFieldOnly && details.location() != kField) {
+ if (requested_location == kFieldOnly &&
+ details.location() != PropertyLocation::kField) {
return MaybeHandle<Map>();
}
return Handle<Map>(target, isolate_);
diff --git a/deps/v8/src/objects/value-serializer.cc b/deps/v8/src/objects/value-serializer.cc
index b80401ac36..a8c78404c4 100644
--- a/deps/v8/src/objects/value-serializer.cc
+++ b/deps/v8/src/objects/value-serializer.cc
@@ -15,6 +15,7 @@
#include "src/base/platform/wrappers.h"
#include "src/execution/isolate.h"
#include "src/flags/flags.h"
+#include "src/handles/global-handles-inl.h"
#include "src/handles/handles-inl.h"
#include "src/handles/maybe-handles-inl.h"
#include "src/heap/factory.h"
@@ -620,7 +621,8 @@ Maybe<bool> ValueSerializer::WriteJSObject(Handle<JSObject> object) {
Handle<Object> value;
if (V8_LIKELY(!map_changed)) map_changed = *map != object->map();
- if (V8_LIKELY(!map_changed && details.location() == kField)) {
+ if (V8_LIKELY(!map_changed &&
+ details.location() == PropertyLocation::kField)) {
DCHECK_EQ(kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
value = JSObject::FastPropertyAt(object, details.representation(),
@@ -804,8 +806,8 @@ Maybe<bool> ValueSerializer::WriteJSPrimitiveWrapper(
void ValueSerializer::WriteJSRegExp(Handle<JSRegExp> regexp) {
WriteTag(SerializationTag::kRegExp);
- WriteString(handle(regexp->Pattern(), isolate_));
- WriteVarint(static_cast<uint32_t>(regexp->GetFlags()));
+ WriteString(handle(regexp->source(), isolate_));
+ WriteVarint(static_cast<uint32_t>(regexp->flags()));
}
Maybe<bool> ValueSerializer::WriteJSMap(Handle<JSMap> map) {
diff --git a/deps/v8/src/objects/visitors-inl.h b/deps/v8/src/objects/visitors-inl.h
new file mode 100644
index 0000000000..25186ac7f9
--- /dev/null
+++ b/deps/v8/src/objects/visitors-inl.h
@@ -0,0 +1,43 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_VISITORS_INL_H_
+#define V8_OBJECTS_VISITORS_INL_H_
+
+#include "src/common/globals.h"
+#include "src/execution/isolate.h"
+#include "src/objects/visitors.h"
+
+namespace v8 {
+namespace internal {
+
+ObjectVisitorWithCageBases::ObjectVisitorWithCageBases(
+ PtrComprCageBase cage_base, PtrComprCageBase code_cage_base)
+#if V8_COMPRESS_POINTERS
+ : cage_base_(cage_base)
+#ifdef V8_EXTERNAL_CODE_SPACE
+ ,
+ code_cage_base_(code_cage_base)
+#endif // V8_EXTERNAL_CODE_SPACE
+#endif // V8_COMPRESS_POINTERS
+{
+}
+
+ObjectVisitorWithCageBases::ObjectVisitorWithCageBases(Isolate* isolate)
+#if V8_COMPRESS_POINTERS
+ : ObjectVisitorWithCageBases(PtrComprCageBase(isolate->cage_base()),
+ PtrComprCageBase(isolate->code_cage_base()))
+#else
+ : ObjectVisitorWithCageBases(PtrComprCageBase(), PtrComprCageBase())
+#endif // V8_COMPRESS_POINTERS
+{
+}
+
+ObjectVisitorWithCageBases::ObjectVisitorWithCageBases(Heap* heap)
+ : ObjectVisitorWithCageBases(Isolate::FromHeap(heap)) {}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_VISITORS_INL_H_
diff --git a/deps/v8/src/objects/visitors.h b/deps/v8/src/objects/visitors.h
index d527cb0a9a..f065bb7147 100644
--- a/deps/v8/src/objects/visitors.h
+++ b/deps/v8/src/objects/visitors.h
@@ -174,6 +174,44 @@ class ObjectVisitor {
virtual void VisitMapPointer(HeapObject host) { UNREACHABLE(); }
};
+// Helper version of ObjectVisitor that also takes care of caching base values
+// of the main pointer compression cage and for the code cage.
+class ObjectVisitorWithCageBases : public ObjectVisitor {
+ public:
+ inline ObjectVisitorWithCageBases(PtrComprCageBase cage_base,
+ PtrComprCageBase code_cage_base);
+ inline explicit ObjectVisitorWithCageBases(Isolate* isolate);
+ inline explicit ObjectVisitorWithCageBases(Heap* heap);
+
+ // The pointer compression cage base value used for decompression of all
+ // tagged values except references to Code objects.
+ PtrComprCageBase cage_base() const {
+#if V8_COMPRESS_POINTERS
+ return cage_base_;
+#else
+ return PtrComprCageBase{};
+#endif // V8_COMPRESS_POINTERS
+ }
+
+ // The pointer compression cage base value used for decompression of
+ // references to Code objects.
+ PtrComprCageBase code_cage_base() const {
+#if V8_EXTERNAL_CODE_SPACE
+ return code_cage_base_;
+#else
+ return cage_base();
+#endif // V8_EXTERNAL_CODE_SPACE
+ }
+
+ private:
+#if V8_COMPRESS_POINTERS
+ const PtrComprCageBase cage_base_;
+#ifdef V8_EXTERNAL_CODE_SPACE
+ const PtrComprCageBase code_cage_base_;
+#endif // V8_EXTERNAL_CODE_SPACE
+#endif // V8_COMPRESS_POINTERS
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index ef2fb7ef3e..8093472eeb 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -1799,6 +1799,7 @@ bool ParserBase<Impl>::ValidateRegExpLiteral(const AstRawString* pattern,
// TODO(jgruber): If already validated in the preparser, skip validation in
// the parser.
DisallowGarbageCollection no_gc;
+ ZoneScope zone_scope(zone()); // Free regexp parser memory after use.
const unsigned char* d = pattern->raw_data();
if (pattern->is_one_byte()) {
return RegExp::VerifySyntax(zone(), stack_limit(),
@@ -3233,20 +3234,21 @@ template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBinaryExpression(
int prec) {
DCHECK_GE(prec, 4);
- ExpressionT x;
+
// "#foo in ShiftExpression" needs to be parsed separately, since private
// identifiers are not valid PrimaryExpressions.
if (V8_UNLIKELY(FLAG_harmony_private_brand_checks &&
peek() == Token::PRIVATE_NAME)) {
- x = ParsePropertyOrPrivatePropertyName();
- if (peek() != Token::IN) {
- ReportUnexpectedToken(peek());
+ ExpressionT x = ParsePropertyOrPrivatePropertyName();
+ int prec1 = Token::Precedence(peek(), accept_IN_);
+ if (peek() != Token::IN || prec1 < prec) {
+ ReportUnexpectedToken(Token::PRIVATE_NAME);
return impl()->FailureExpression();
}
- } else {
- x = ParseUnaryExpression();
+ return ParseBinaryContinuation(x, prec, prec1);
}
+ ExpressionT x = ParseUnaryExpression();
int prec1 = Token::Precedence(peek(), accept_IN_);
if (prec1 >= prec) {
return ParseBinaryContinuation(x, prec, prec1);
@@ -5321,7 +5323,7 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseBlock(
body->set_scope(scope()->FinalizeBlockScope());
}
- body->InitializeStatements(statements, zone_);
+ body->InitializeStatements(statements, zone());
return body;
}
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index 5699148c3b..c5cc0c8030 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -709,13 +709,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return arg;
}
- const AstRawString* PreParserIdentifierToAstRawString(
- const PreParserIdentifier& arg) {
- // This method definition is only needed due to an MSVC oddity that
- // instantiates the method despite it being unused. See crbug.com/v8/12266 .
- UNREACHABLE();
- }
-
IterationStatement* AsIterationStatement(BreakableStatement* s) {
return s->AsIterationStatement();
}
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index a4748f0c33..f090503c84 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -11,6 +11,7 @@
#include "include/v8-primitive.h"
#include "src/base/strings.h"
#include "src/common/globals.h"
+#include "src/execution/isolate-utils.h"
#include "src/handles/handles.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/objects-inl.h"
@@ -102,7 +103,7 @@ class ExternalStringStream {
ExternalStringStream(ExternalString string, size_t start_offset,
size_t length)
: lock_(string),
- data_(string.GetChars() + start_offset),
+ data_(string.GetChars(GetPtrComprCageBase(string)) + start_offset),
length_(length) {}
ExternalStringStream(const ExternalStringStream& other) V8_NOEXCEPT
diff --git a/deps/v8/src/profiler/allocation-tracker.cc b/deps/v8/src/profiler/allocation-tracker.cc
index 9bba48521c..f228d79ad6 100644
--- a/deps/v8/src/profiler/allocation-tracker.cc
+++ b/deps/v8/src/profiler/allocation-tracker.cc
@@ -5,7 +5,7 @@
#include "src/profiler/allocation-tracker.h"
#include "src/execution/frames-inl.h"
-#include "src/handles/global-handles.h"
+#include "src/handles/global-handles-inl.h"
#include "src/objects/objects-inl.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index cf4f549a39..829f2ab67f 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -40,9 +40,10 @@ class CpuSampler : public sampler::Sampler {
void SampleStack(const v8::RegisterState& regs) override {
Isolate* isolate = reinterpret_cast<Isolate*>(this->isolate());
- if (v8::Locker::IsActive() && (!isolate->thread_manager()->IsLockedByThread(
- perThreadData_->thread_id()) ||
- perThreadData_->thread_state() != nullptr)) {
+ if (v8::Locker::WasEverUsed() &&
+ (!isolate->thread_manager()->IsLockedByThread(
+ perThreadData_->thread_id()) ||
+ perThreadData_->thread_state() != nullptr)) {
ProfilerStats::Instance()->AddReason(
ProfilerStats::Reason::kIsolateNotLocked);
return;
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 1144fdd15e..13c587dd76 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -604,7 +604,7 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject object) {
return AddEntry(object, HeapEntry::kClosure, "native_bind");
} else if (object.IsJSRegExp()) {
JSRegExp re = JSRegExp::cast(object);
- return AddEntry(object, HeapEntry::kRegExp, names_->GetName(re.Pattern()));
+ return AddEntry(object, HeapEntry::kRegExp, names_->GetName(re.source()));
} else if (object.IsJSObject()) {
const char* name = names_->GetName(
GetConstructorName(JSObject::cast(object)));
@@ -718,11 +718,12 @@ int V8HeapExplorer::EstimateObjectsCount() {
return objects_count;
}
-class IndexedReferencesExtractor : public ObjectVisitor {
+class IndexedReferencesExtractor : public ObjectVisitorWithCageBases {
public:
IndexedReferencesExtractor(V8HeapExplorer* generator, HeapObject parent_obj,
HeapEntry* parent)
- : generator_(generator),
+ : ObjectVisitorWithCageBases(generator->isolate()),
+ generator_(generator),
parent_obj_(parent_obj),
parent_start_(parent_obj_.RawMaybeWeakField(0)),
parent_end_(parent_obj_.RawMaybeWeakField(parent_obj_.Size())),
@@ -733,10 +734,7 @@ class IndexedReferencesExtractor : public ObjectVisitor {
VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
}
void VisitMapPointer(HeapObject object) override {
- // TODO(v8:11880): support external code space (here object could be Code,
- // so the V8 heap cage_base must be used here).
- PtrComprCageBase cage_base = GetPtrComprCageBase(object);
- VisitSlotImpl(cage_base, object.map_slot());
+ VisitSlotImpl(cage_base(), object.map_slot());
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) override {
@@ -744,17 +742,14 @@ class IndexedReferencesExtractor : public ObjectVisitor {
// all the slots must point inside the object.
CHECK_LE(parent_start_, start);
CHECK_LE(end, parent_end_);
- PtrComprCageBase cage_base = GetPtrComprCageBase(host);
for (MaybeObjectSlot slot = start; slot < end; ++slot) {
- VisitSlotImpl(cage_base, slot);
+ VisitSlotImpl(cage_base(), slot);
}
}
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // TODO(v8:11880): support external code space.
- PtrComprCageBase code_cage_base = GetPtrComprCageBase(host);
- VisitSlotImpl(code_cage_base, slot);
+ VisitSlotImpl(code_cage_base(), slot);
}
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
@@ -763,7 +758,7 @@ class IndexedReferencesExtractor : public ObjectVisitor {
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- HeapObject object = rinfo->target_object();
+ HeapObject object = rinfo->target_object_no_host(cage_base());
if (host.IsWeakObject(object)) {
generator_->SetWeakReference(parent_, next_index_++, object, {});
} else {
@@ -1428,7 +1423,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject js_obj,
for (InternalIndex i : js_obj.map().IterateOwnDescriptors()) {
PropertyDetails details = descs.GetDetails(i);
switch (details.location()) {
- case kField: {
+ case PropertyLocation::kField: {
if (!snapshot_->capture_numeric_value()) {
Representation r = details.representation();
if (r.IsSmi() || r.IsDouble()) break;
@@ -1444,7 +1439,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject js_obj,
nullptr, field_offset);
break;
}
- case kDescriptor:
+ case PropertyLocation::kDescriptor:
SetDataOrAccessorPropertyReference(
details.kind(), entry, descs.GetKey(i), descs.GetStrongValue(i));
break;
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 1855aee53c..682a28773c 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -13,6 +13,7 @@
#include "include/v8-profiler.h"
#include "src/base/platform/time.h"
+#include "src/execution/isolate.h"
#include "src/objects/fixed-array.h"
#include "src/objects/hash-table.h"
#include "src/objects/heap-object.h"
@@ -349,6 +350,8 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
V8HeapExplorer(const V8HeapExplorer&) = delete;
V8HeapExplorer& operator=(const V8HeapExplorer&) = delete;
+ V8_INLINE Isolate* isolate() { return Isolate::FromHeap(heap_); }
+
HeapEntry* AllocateEntry(HeapThing ptr) override;
HeapEntry* AllocateEntry(Smi smi) override;
int EstimateObjectsCount();
diff --git a/deps/v8/src/profiler/weak-code-registry.cc b/deps/v8/src/profiler/weak-code-registry.cc
index 2918e1ca82..961164d793 100644
--- a/deps/v8/src/profiler/weak-code-registry.cc
+++ b/deps/v8/src/profiler/weak-code-registry.cc
@@ -4,7 +4,8 @@
#include "src/profiler/weak-code-registry.h"
-#include "src/handles/global-handles.h"
+#include "src/handles/global-handles-inl.h"
+#include "src/objects/code-inl.h"
#include "src/objects/instance-type-inl.h"
namespace v8 {
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 6c90e00817..f21ee023da 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -6,15 +6,13 @@
#include "src/regexp/arm/regexp-macro-assembler-arm.h"
-#include "src/codegen/assembler-inl.h"
+#include "src/codegen/arm/assembler-arm-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/heap/factory.h"
#include "src/logging/log.h"
-#include "src/objects/objects-inl.h"
-#include "src/regexp/regexp-macro-assembler.h"
+#include "src/objects/code-inl.h"
#include "src/regexp/regexp-stack.h"
#include "src/snapshot/embedded/embedded-data.h"
-#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -40,14 +38,12 @@ namespace internal {
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
- * - fp[56] Address regexp (address of the JSRegExp object; unused in
+ * - fp[52] Address regexp (address of the JSRegExp object; unused in
* native code, passed to match signature of
* the interpreter)
- * - fp[52] Isolate* isolate (address of the current isolate)
- * - fp[48] direct_call (if 1, direct call from JavaScript code,
+ * - fp[48] Isolate* isolate (address of the current isolate)
+ * - fp[44] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
- * - fp[44] stack_area_base (high end of the memory area to use as
- * backtracking stack).
* - fp[40] capture array size (may fit multiple sets of matches)
* - fp[36] int* capture_array (int[num_saved_registers_], for output).
* --- sp when called ---
@@ -84,7 +80,6 @@ namespace internal {
* Address end,
* int* capture_output_array,
* int num_capture_registers,
- * byte* stack_area_base,
* bool direct_call = false,
* Isolate* isolate,
* Address regexp);
@@ -100,8 +95,10 @@ RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ masm_(std::make_unique<MacroAssembler>(
+ isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -110,15 +107,12 @@ RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(Isolate* isolate, Zone* zone,
success_label_(),
backtrack_label_(),
exit_label_() {
- masm_->set_root_array_available(false);
-
DCHECK_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
__ bind(&start_label_); // And then continue from here.
}
RegExpMacroAssemblerARM::~RegExpMacroAssemblerARM() {
- delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
@@ -338,7 +332,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
{
- AllowExternalCallThatCantCauseGC scope(masm_);
+ AllowExternalCallThatCantCauseGC scope(masm_.get());
ExternalReference function =
unicode ? ExternalReference::re_case_insensitive_compare_unicode(
isolate())
@@ -619,6 +613,42 @@ void RegExpMacroAssemblerARM::Fail() {
__ jmp(&exit_label_);
}
+void RegExpMacroAssemblerARM::LoadRegExpStackPointerFromMemory(Register dst) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ mov(dst, Operand(ref));
+ __ ldr(dst, MemOperand(dst));
+}
+
+void RegExpMacroAssemblerARM::StoreRegExpStackPointerToMemory(
+ Register src, Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ mov(scratch, Operand(ref));
+ __ str(src, MemOperand(scratch));
+}
+
+void RegExpMacroAssemblerARM::PushRegExpBasePointer(Register stack_pointer,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(scratch, Operand(ref));
+ __ ldr(scratch, MemOperand(scratch));
+ __ sub(scratch, stack_pointer, scratch);
+ __ str(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+}
+
+void RegExpMacroAssemblerARM::PopRegExpBasePointer(Register stack_pointer_out,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ ldr(stack_pointer_out,
+ MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ mov(scratch, Operand(ref));
+ __ ldr(scratch, MemOperand(scratch));
+ __ add(stack_pointer_out, stack_pointer_out, scratch);
+ StoreRegExpStackPointerToMemory(stack_pointer_out, scratch);
+}
Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label return_r0;
@@ -630,7 +660,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Tell the system that we have a stack frame. Because the type is MANUAL, no
// is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
+ FrameScope scope(masm_.get(), StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
// Push arguments
@@ -654,34 +684,47 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ push(r0); // Make room for "string start - 1" constant.
STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
__ push(r0); // The backtrack counter.
+ STATIC_ASSERT(kRegExpStackBasePointer ==
+ kBacktrackCount - kSystemPointerSize);
+ __ push(r0); // The regexp stack base ptr.
+
+ // Initialize backtrack stack pointer. It must not be clobbered from here on.
+ // Note the backtrack_stackpointer is callee-saved.
+ STATIC_ASSERT(backtrack_stackpointer() == r8);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
+ // Store the regexp base pointer - we'll later restore it / write it to
+ // memory when returning from this irregexp code object.
+ PushRegExpBasePointer(backtrack_stackpointer(), r1);
+
+ {
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit, stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(isolate());
+ __ mov(r0, Operand(stack_limit));
+ __ ldr(r0, MemOperand(r0));
+ __ sub(r0, sp, r0, SetCC);
+ // Handle it if the stack pointer is already below the stack limit.
+ __ b(ls, &stack_limit_hit);
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ cmp(r0, Operand(num_registers_ * kPointerSize));
+ __ b(hs, &stack_ok);
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ mov(r0, Operand(EXCEPTION));
+ __ jmp(&return_r0);
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_jslimit(isolate());
- __ mov(r0, Operand(stack_limit));
- __ ldr(r0, MemOperand(r0));
- __ sub(r0, sp, r0, SetCC);
- // Handle it if the stack pointer is already below the stack limit.
- __ b(ls, &stack_limit_hit);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ cmp(r0, Operand(num_registers_ * kPointerSize));
- __ b(hs, &stack_ok);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ mov(r0, Operand(EXCEPTION));
- __ jmp(&return_r0);
-
- __ bind(&stack_limit_hit);
- CallCheckStackGuardState();
- __ cmp(r0, Operand::Zero());
- // If returned value is non-zero, we exit with the returned value as result.
- __ b(ne, &return_r0);
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState();
+ __ cmp(r0, Operand::Zero());
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ b(ne, &return_r0);
- __ bind(&stack_ok);
+ __ bind(&stack_ok);
+ }
// Allocate space on stack for registers.
__ AllocateStackSpace(num_registers_ * kPointerSize);
@@ -703,18 +746,21 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ cmp(r1, Operand::Zero());
- __ b(ne, &load_char_start_regexp);
- __ mov(current_character(), Operand('\n'), LeaveCC, eq);
- __ jmp(&start_regexp);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
+ Label load_char_start_regexp;
+ {
+ Label start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ cmp(r1, Operand::Zero());
+ __ b(ne, &load_char_start_regexp);
+ __ mov(current_character(), Operand('\n'), LeaveCC, eq);
+ __ jmp(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+ }
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
@@ -735,9 +781,6 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
}
}
- // Initialize backtrack stack pointer.
- __ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
-
__ jmp(&start_label_);
// Exit code:
@@ -804,6 +847,10 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Prepare r0 to initialize registers with its value in the next run.
__ ldr(r0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), r2);
+
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// r4: capture start index
@@ -834,6 +881,10 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
}
__ bind(&return_r0);
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), r2);
+
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers r4..r11 and return (restoring lr to pc).
@@ -851,12 +902,16 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), r1);
+
CallCheckStackGuardState();
__ cmp(r0, Operand::Zero());
// If returning non-zero, we should end execution with the given
// result as return value.
__ b(ne, &return_r0);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
// String might have moved: Reload end of string from frame.
__ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
SafeReturn();
@@ -867,17 +922,18 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
SafeCallTarget(&stack_overflow_label_);
// Reached if the backtrack-stack limit has been hit.
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments);
- __ mov(r0, backtrack_stackpointer());
- __ add(r1, frame_pointer(), Operand(kStackHighEnd));
- __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
+ // Call GrowStack(isolate).
+
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), r1);
+
+ static constexpr int kNumArguments = 1;
+ __ PrepareCallCFunction(kNumArguments);
+ __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
ExternalReference grow_stack =
ExternalReference::re_grow_stack(isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // If return nullptr, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
+ __ CallCFunction(grow_stack, kNumArguments);
+ // If nullptr is returned, we have failed to grow the stack, and must exit
+ // with a stack-overflow exception.
__ cmp(r0, Operand::Zero());
__ b(eq, &exit_with_exception);
// Otherwise use return value as new stack pointer.
@@ -984,14 +1040,24 @@ void RegExpMacroAssemblerARM::ReadCurrentPositionFromRegister(int reg) {
__ ldr(current_input_offset(), register_location(reg));
}
+void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(r1, Operand(ref));
+ __ ldr(r1, MemOperand(r1));
+ __ sub(r0, backtrack_stackpointer(), r1);
+ __ str(r0, register_location(reg));
+}
void RegExpMacroAssemblerARM::ReadStackPointerFromRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(r0, Operand(ref));
+ __ ldr(r0, MemOperand(r0));
__ ldr(backtrack_stackpointer(), register_location(reg));
- __ ldr(r0, MemOperand(frame_pointer(), kStackHighEnd));
- __ add(backtrack_stackpointer(), backtrack_stackpointer(), Operand(r0));
+ __ add(backtrack_stackpointer(), backtrack_stackpointer(), r0);
}
-
void RegExpMacroAssemblerARM::SetCurrentPositionFromEnd(int by) {
Label after_position;
__ cmp(current_input_offset(), Operand(-by * char_size()));
@@ -1037,14 +1103,6 @@ void RegExpMacroAssemblerARM::ClearRegisters(int reg_from, int reg_to) {
}
}
-
-void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) {
- __ ldr(r1, MemOperand(frame_pointer(), kStackHighEnd));
- __ sub(r0, backtrack_stackpointer(), r1);
- __ str(r0, register_location(reg));
-}
-
-
// Private methods:
void RegExpMacroAssemblerARM::CallCheckStackGuardState() {
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index a02a4dc2af..478ed292ae 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -5,8 +5,6 @@
#ifndef V8_REGEXP_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
#define V8_REGEXP_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
-#include "src/base/strings.h"
-#include "src/codegen/arm/assembler-arm.h"
#include "src/codegen/macro-assembler.h"
#include "src/regexp/regexp-macro-assembler.h"
@@ -93,15 +91,13 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM
static const int kFramePointer = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
- // Register 4..11.
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput = kReturnAddress + kPointerSize;
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kDirectCall = kNumOutputRegisters + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
// Below the frame pointer.
@@ -115,8 +111,14 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM
static const int kSuccessfulCaptures = kInputString - kPointerSize;
static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // Stores the initial value of the regexp stack pointer in a
+ // position-independent representation (in case the regexp stack grows and
+ // thus moves).
+ static const int kRegExpStackBasePointer =
+ kBacktrackCount - kSystemPointerSize;
+
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
+ static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
@@ -129,7 +131,6 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM
// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();
-
// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState();
@@ -138,27 +139,27 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM
// Register holding the current input position as negative offset from
// the end of the string.
- inline Register current_input_offset() { return r6; }
+ static constexpr Register current_input_offset() { return r6; }
// The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return r7; }
+ static constexpr Register current_character() { return r7; }
// Register holding address of the end of the input string.
- inline Register end_of_input_address() { return r10; }
+ static constexpr Register end_of_input_address() { return r10; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
- inline Register frame_pointer() { return fp; }
+ static constexpr Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- inline Register backtrack_stackpointer() { return r8; }
+ static constexpr Register backtrack_stackpointer() { return r8; }
// Register holding pointer to the current code object.
- inline Register code_pointer() { return r5; }
+ static constexpr Register code_pointer() { return r5; }
// Byte size of chars in the string to match (decided by the Mode argument)
- inline int char_size() { return static_cast<int>(mode_); }
+ inline int char_size() const { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is nullptr, in which case it is a conditional Backtrack.
@@ -178,19 +179,25 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM
// and increments it by a word size.
inline void Pop(Register target);
+ void LoadRegExpStackPointerFromMemory(Register dst);
+ void StoreRegExpStackPointerToMemory(Register src, Register scratch);
+ void PushRegExpBasePointer(Register stack_pointer, Register scratch);
+ void PopRegExpBasePointer(Register stack_pointer_out, Register scratch);
+
Isolate* isolate() const { return masm_->isolate(); }
- MacroAssembler* masm_;
+ const std::unique_ptr<MacroAssembler> masm_;
+ const NoRootArrayScope no_root_array_scope_;
// Which mode to generate code for (Latin1 or UC16).
- Mode mode_;
+ const Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1)
- int num_saved_registers_;
+ const int num_saved_registers_;
// Labels used internally.
Label entry_label_;
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 67793ffc41..b0d55d4fe0 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -66,14 +66,12 @@ namespace internal {
* ^^^^^^^^^ fp ^^^^^^^^^
* - fp[-8] direct_call 1 => Direct call from JavaScript code.
* 0 => Call through the runtime system.
- * - fp[-16] stack_base High end of the memory area to use as
- * the backtracking stack.
- * - fp[-24] output_size Output may fit multiple sets of matches.
- * - fp[-32] input Handle containing the input string.
- * - fp[-40] success_counter
+ * - fp[-16] output_size Output may fit multiple sets of matches.
+ * - fp[-24] input Handle containing the input string.
+ * - fp[-32] success_counter
* ^^^^^^^^^^^^^ From here and downwards we store 32 bit values ^^^^^^^^^^^^^
- * - fp[-44] register N Capture registers initialized with
- * - fp[-48] register N + 1 non_position_value.
+ * - fp[-40] register N Capture registers initialized with
+ * - fp[-44] register N + 1 non_position_value.
* ... The first kNumCachedRegisters (N) registers
* ... are cached in x0 to x7.
* ... Only positions must be stored in the first
@@ -95,7 +93,6 @@ namespace internal {
* Address end,
* int* capture_output_array,
* int num_capture_registers,
- * byte* stack_area_base,
* bool direct_call = false,
* Isolate* isolate,
* Address regexp);
@@ -111,8 +108,10 @@ RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(Isolate* isolate,
Zone* zone, Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ masm_(std::make_unique<MacroAssembler>(
+ isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -121,8 +120,6 @@ RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(Isolate* isolate,
success_label_(),
backtrack_label_(),
exit_label_() {
- masm_->set_root_array_available(false);
-
DCHECK_EQ(0, registers_to_save % 2);
// We can cache at most 16 W registers in x0-x7.
STATIC_ASSERT(kNumCachedRegisters <= 16);
@@ -134,7 +131,6 @@ RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(Isolate* isolate,
}
RegExpMacroAssemblerARM64::~RegExpMacroAssemblerARM64() {
- delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
@@ -194,7 +190,7 @@ void RegExpMacroAssemblerARM64::Backtrack() {
CheckPreemption();
if (has_backtrack_limit()) {
Label next;
- UseScratchRegisterScope temps(masm_);
+ UseScratchRegisterScope temps(masm_.get());
Register scratch = temps.AcquireW();
__ Ldr(scratch, MemOperand(frame_pointer(), kBacktrackCount));
__ Add(scratch, scratch, 1);
@@ -425,7 +421,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ Mov(x3, ExternalReference::isolate_address(isolate()));
{
- AllowExternalCallThatCantCauseGC scope(masm_);
+ AllowExternalCallThatCantCauseGC scope(masm_.get());
ExternalReference function =
unicode ? ExternalReference::re_case_insensitive_compare_unicode(
isolate())
@@ -699,6 +695,42 @@ void RegExpMacroAssemblerARM64::Fail() {
__ B(&exit_label_);
}
+void RegExpMacroAssemblerARM64::LoadRegExpStackPointerFromMemory(Register dst) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ Mov(dst, ref);
+ __ Ldr(dst, MemOperand(dst));
+}
+
+void RegExpMacroAssemblerARM64::StoreRegExpStackPointerToMemory(
+ Register src, Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ Mov(scratch, ref);
+ __ Str(src, MemOperand(scratch));
+}
+
+void RegExpMacroAssemblerARM64::PushRegExpBasePointer(Register stack_pointer,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ Mov(scratch, ref);
+ __ Ldr(scratch, MemOperand(scratch));
+ __ Sub(scratch, stack_pointer, scratch);
+ __ Str(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+}
+
+void RegExpMacroAssemblerARM64::PopRegExpBasePointer(Register stack_pointer_out,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ Ldr(stack_pointer_out,
+ MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ Mov(scratch, ref);
+ __ Ldr(scratch, MemOperand(scratch));
+ __ Add(stack_pointer_out, stack_pointer_out, scratch);
+ StoreRegExpStackPointerToMemory(stack_pointer_out, scratch);
+}
Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
Label return_w0;
@@ -715,15 +747,14 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// x3: byte* input_end
// x4: int* output array
// x5: int output array size
- // x6: Address stack_base
- // x7: int direct_call
-
- // sp[8]: address of the current isolate
- // sp[0]: secondary link/return address used by native call
+ // x6: int direct_call
+ // x7: Isolate* isolate
+ //
+ // sp[0]: secondary link/return address used by native call
// Tell the system that we have a stack frame. Because the type is MANUAL, no
// code is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
+ FrameScope scope(masm_.get(), StackFrame::MANUAL);
// Push registers on the stack, only push the argument registers that we need.
CPURegList argument_registers(x0, x5, x6, x7);
@@ -744,52 +775,63 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
__ Mov(input_end(), x3);
__ Mov(output_array(), x4);
- // Set the number of registers we will need to allocate, that is:
- // - kSuccessCounter / success_counter (X register)
- // - kBacktrackCount (X register)
- // - (num_registers_ - kNumCachedRegisters) (W registers)
- int num_wreg_to_allocate = num_registers_ - kNumCachedRegisters;
- // Do not allocate registers on the stack if they can all be cached.
- if (num_wreg_to_allocate < 0) { num_wreg_to_allocate = 0; }
- // Make room for the success_counter and kBacktrackCount. Each X (64-bit)
- // register is equivalent to two W (32-bit) registers.
- num_wreg_to_allocate += 2 + 2;
-
// Make sure the stack alignment will be respected.
- int alignment = masm_->ActivationFrameAlignment();
+ const int alignment = masm_->ActivationFrameAlignment();
DCHECK_EQ(alignment % 16, 0);
- int align_mask = (alignment / kWRegSize) - 1;
- num_wreg_to_allocate = (num_wreg_to_allocate + align_mask) & ~align_mask;
+ const int align_mask = (alignment / kWRegSize) - 1;
- // Check if we have space on the stack.
- Label stack_limit_hit;
- Label stack_ok;
+ // Make room for stack locals.
+ static constexpr int kWRegPerXReg = kXRegSize / kWRegSize;
+ DCHECK_EQ(kNumberOfStackLocals * kWRegPerXReg,
+ ((kNumberOfStackLocals * kWRegPerXReg) + align_mask) & ~align_mask);
+ __ Claim(kNumberOfStackLocals * kWRegPerXReg);
- ExternalReference stack_limit =
- ExternalReference::address_of_jslimit(isolate());
- __ Mov(x10, stack_limit);
- __ Ldr(x10, MemOperand(x10));
- __ Subs(x10, sp, x10);
+ // Initialize backtrack stack pointer. It must not be clobbered from here on.
+ // Note the backtrack_stackpointer is callee-saved.
+ STATIC_ASSERT(backtrack_stackpointer() == x23);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
- // Handle it if the stack pointer is already below the stack limit.
- __ B(ls, &stack_limit_hit);
+ // Store the regexp base pointer - we'll later restore it / write it to
+ // memory when returning from this irregexp code object.
+ PushRegExpBasePointer(backtrack_stackpointer(), x11);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ Cmp(x10, num_wreg_to_allocate * kWRegSize);
- __ B(hs, &stack_ok);
-
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ Mov(w0, EXCEPTION);
- __ B(&return_w0);
+ // Set the number of registers we will need to allocate, that is:
+ // - (num_registers_ - kNumCachedRegisters) (W registers)
+ const int num_stack_registers =
+ std::max(0, num_registers_ - kNumCachedRegisters);
+ const int num_wreg_to_allocate =
+ (num_stack_registers + align_mask) & ~align_mask;
+
+ {
+ // Check if we have space on the stack.
+ Label stack_limit_hit, stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(isolate());
+ __ Mov(x10, stack_limit);
+ __ Ldr(x10, MemOperand(x10));
+ __ Subs(x10, sp, x10);
+
+ // Handle it if the stack pointer is already below the stack limit.
+ __ B(ls, &stack_limit_hit);
+
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ Cmp(x10, num_wreg_to_allocate * kWRegSize);
+ __ B(hs, &stack_ok);
+
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ Mov(w0, EXCEPTION);
+ __ B(&return_w0);
- __ Bind(&stack_limit_hit);
- CallCheckStackGuardState(x10);
- // If returned value is non-zero, we exit with the returned value as result.
- __ Cbnz(w0, &return_w0);
+ __ Bind(&stack_limit_hit);
+ CallCheckStackGuardState(x10);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ Cbnz(w0, &return_w0);
- __ Bind(&stack_ok);
+ __ Bind(&stack_ok);
+ }
// Allocate space on stack.
__ Claim(num_wreg_to_allocate, kWRegSize);
@@ -822,26 +864,27 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Initialize code pointer register.
__ Mov(code_pointer(), Operand(masm_->CodeObject()));
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ Cbnz(start_offset(), &load_char_start_regexp);
- __ Mov(current_character(), '\n');
- __ B(&start_regexp);
+ Label load_char_start_regexp;
+ {
+ Label start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ Cbnz(start_offset(), &load_char_start_regexp);
+ __ Mov(current_character(), '\n');
+ __ B(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ Bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ Bind(&start_regexp);
+ }
- // Global regexp restarts matching here.
- __ Bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ Bind(&start_regexp);
// Initialize on-stack registers.
if (num_saved_registers_ > 0) {
ClearRegisters(0, num_saved_registers_ - 1);
}
- // Initialize backtrack stack pointer.
- __ Ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackBase));
-
- // Execute
+ // Execute.
__ B(&start_label_);
if (backtrack_label_.is_linked()) {
@@ -991,6 +1034,10 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Update output size on the frame before we restart matching.
__ Str(output_size, MemOperand(frame_pointer(), kOutputSize));
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), x11);
+
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
__ Cmp(current_input_offset(), first_capture_start);
@@ -1013,7 +1060,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
}
if (exit_label_.is_linked()) {
- // Exit and return w0
+ // Exit and return w0.
__ Bind(&exit_label_);
if (global()) {
__ Ldr(w0, MemOperand(frame_pointer(), kSuccessCounter));
@@ -1021,8 +1068,11 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
}
__ Bind(&return_w0);
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), x11);
- // Set stack pointer back to first register to retain
+ // Set stack pointer back to first register to retain.
__ Mov(sp, fp);
__ Pop<TurboAssembler::kAuthLR>(fp, lr);
@@ -1039,6 +1089,9 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
if (check_preempt_label_.is_linked()) {
__ Bind(&check_preempt_label_);
+
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), x10);
+
SaveLinkRegister();
// The cached registers need to be retained.
__ PushCPURegList(cached_registers);
@@ -1048,26 +1101,30 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
__ Cbnz(w0, &return_w0);
// Reset the cached registers.
__ PopCPURegList(cached_registers);
+
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
RestoreLinkRegister();
__ Ret();
}
if (stack_overflow_label_.is_linked()) {
__ Bind(&stack_overflow_label_);
+
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), x10);
+
SaveLinkRegister();
// The cached registers need to be retained.
__ PushCPURegList(cached_registers);
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- __ Mov(x2, ExternalReference::isolate_address(isolate()));
- __ Add(x1, frame_pointer(), kStackBase);
- __ Mov(x0, backtrack_stackpointer());
- ExternalReference grow_stack =
- ExternalReference::re_grow_stack(isolate());
- __ CallCFunction(grow_stack, 3);
- // If return nullptr, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
- // Returning from the regexp code restores the stack (sp <- fp)
- // so we don't need to drop the link register from it before exiting.
+ // Call GrowStack(isolate)
+ static constexpr int kNumArguments = 1;
+ __ Mov(x0, ExternalReference::isolate_address(isolate()));
+ __ CallCFunction(ExternalReference::re_grow_stack(isolate()),
+ kNumArguments);
+ // If return nullptr, we have failed to grow the stack, and must exit with
+ // a stack-overflow exception. Returning from the regexp code restores the
+ // stack (sp <- fp) so we don't need to drop the link register from it
+ // before exiting.
__ Cbz(w0, &exit_with_exception);
// Otherwise use return value as new stack pointer.
__ Mov(backtrack_stackpointer(), x0);
@@ -1191,14 +1248,29 @@ void RegExpMacroAssemblerARM64::ReadCurrentPositionFromRegister(int reg) {
}
}
+void RegExpMacroAssemblerARM64::WriteStackPointerToRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ Mov(x10, ref);
+ __ Ldr(x10, MemOperand(x10));
+ __ Sub(x10, backtrack_stackpointer(), x10);
+ if (FLAG_debug_code) {
+ __ Cmp(x10, Operand(w10, SXTW));
+ // The stack offset needs to fit in a W register.
+ __ Check(eq, AbortReason::kOffsetOutOfRange);
+ }
+ StoreRegister(reg, w10);
+}
void RegExpMacroAssemblerARM64::ReadStackPointerFromRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
Register read_from = GetRegister(reg, w10);
- __ Ldr(x11, MemOperand(frame_pointer(), kStackBase));
+ __ Mov(x11, ref);
+ __ Ldr(x11, MemOperand(x11));
__ Add(backtrack_stackpointer(), x11, Operand(read_from, SXTW));
}
-
void RegExpMacroAssemblerARM64::SetCurrentPositionFromEnd(int by) {
Label after_position;
__ Cmp(current_input_offset(), -by * char_size());
@@ -1300,19 +1372,6 @@ void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
}
}
-
-void RegExpMacroAssemblerARM64::WriteStackPointerToRegister(int reg) {
- __ Ldr(x10, MemOperand(frame_pointer(), kStackBase));
- __ Sub(x10, backtrack_stackpointer(), x10);
- if (FLAG_debug_code) {
- __ Cmp(x10, Operand(w10, SXTW));
- // The stack offset needs to fit in a W register.
- __ Check(eq, AbortReason::kOffsetOutOfRange);
- }
- StoreRegister(reg, w10);
-}
-
-
// Helper function for reading a value out of a stack frame.
template <typename T>
static T& frame_entry(Address re_frame, int frame_offset) {
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index 80931e3ca4..7b3d1b90e4 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -102,26 +102,32 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64
// Callee-saved registers (x19-x28).
static const int kNumCalleeSavedRegisters = 10;
static const int kCalleeSavedRegisters = kReturnAddress + kSystemPointerSize;
- // Stack parameter placed by caller.
- // It is placed above the FP, LR and the callee-saved registers.
- static const int kIsolate =
- kCalleeSavedRegisters + kNumCalleeSavedRegisters * kSystemPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
- static const int kDirectCall = -kSystemPointerSize;
- static const int kStackBase = kDirectCall - kSystemPointerSize;
- static const int kOutputSize = kStackBase - kSystemPointerSize;
+ static const int kIsolate = -kSystemPointerSize;
+ static const int kDirectCall = kIsolate - kSystemPointerSize;
+ static const int kOutputSize = kDirectCall - kSystemPointerSize;
static const int kInput = kOutputSize - kSystemPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessCounter = kInput - kSystemPointerSize;
static const int kBacktrackCount = kSuccessCounter - kSystemPointerSize;
+ // Stores the initial value of the regexp stack pointer in a
+ // position-independent representation (in case the regexp stack grows and
+ // thus moves).
+ static const int kRegExpStackBasePointer =
+ kBacktrackCount - kSystemPointerSize;
+ // A padding slot to preserve alignment.
+ static const int kStackLocalPadding =
+ kRegExpStackBasePointer - kSystemPointerSize;
+ static constexpr int kNumberOfStackLocals = 4;
+
// First position register address on the stack. Following positions are
// below it. A position is a 32 bit value.
- static const int kFirstRegisterOnStack = kBacktrackCount - kWRegSize;
+ static const int kFirstRegisterOnStack = kStackLocalPadding - kWRegSize;
// A capture is a 64 bit value holding two position.
- static const int kFirstCaptureOnStack = kBacktrackCount - kXRegSize;
+ static const int kFirstCaptureOnStack = kStackLocalPadding - kXRegSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
@@ -152,43 +158,43 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64
// Register holding the current input position as negative offset from
// the end of the string.
- Register current_input_offset() { return w21; }
+ static constexpr Register current_input_offset() { return w21; }
// The register containing the current character after LoadCurrentCharacter.
- Register current_character() { return w22; }
+ static constexpr Register current_character() { return w22; }
// Register holding address of the end of the input string.
- Register input_end() { return x25; }
+ static constexpr Register input_end() { return x25; }
// Register holding address of the start of the input string.
- Register input_start() { return x26; }
+ static constexpr Register input_start() { return x26; }
// Register holding the offset from the start of the string where we should
// start matching.
- Register start_offset() { return w27; }
+ static constexpr Register start_offset() { return w27; }
// Pointer to the output array's first element.
- Register output_array() { return x28; }
+ static constexpr Register output_array() { return x28; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
- Register frame_pointer() { return fp; }
+ static constexpr Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- Register backtrack_stackpointer() { return x23; }
+ static constexpr Register backtrack_stackpointer() { return x23; }
// Register holding pointer to the current code object.
- Register code_pointer() { return x20; }
+ static constexpr Register code_pointer() { return x20; }
// Register holding the value used for clearing capture registers.
- Register string_start_minus_one() { return w24; }
+ static constexpr Register string_start_minus_one() { return w24; }
// The top 32 bit of this register is used to store this value
// twice. This is used for clearing more than one register at a time.
- Register twice_non_position_value() { return x24; }
+ static constexpr Register twice_non_position_value() { return x24; }
// Byte size of chars in the string to match (decided by the Mode argument)
- int char_size() { return static_cast<int>(mode_); }
+ int char_size() const { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is nullptr, in which case it is a conditional Backtrack.
@@ -254,19 +260,25 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64
// This assumes that the state of the register is not STACKED.
inline Register GetCachedRegister(int register_index);
+ void LoadRegExpStackPointerFromMemory(Register dst);
+ void StoreRegExpStackPointerToMemory(Register src, Register scratch);
+ void PushRegExpBasePointer(Register stack_pointer, Register scratch);
+ void PopRegExpBasePointer(Register stack_pointer_out, Register scratch);
+
Isolate* isolate() const { return masm_->isolate(); }
- MacroAssembler* masm_;
+ const std::unique_ptr<MacroAssembler> masm_;
+ const NoRootArrayScope no_root_array_scope_;
// Which mode to generate code for (LATIN1 or UC16).
- Mode mode_;
+ const Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1)
- int num_saved_registers_;
+ const int num_saved_registers_;
// Labels used internally.
Label entry_label_;
diff --git a/deps/v8/src/regexp/experimental/experimental.cc b/deps/v8/src/regexp/experimental/experimental.cc
index c05a010d06..1e745eaa31 100644
--- a/deps/v8/src/regexp/experimental/experimental.cc
+++ b/deps/v8/src/regexp/experimental/experimental.cc
@@ -36,13 +36,13 @@ void ExperimentalRegExp::Initialize(Isolate* isolate, Handle<JSRegExp> re,
bool ExperimentalRegExp::IsCompiled(Handle<JSRegExp> re, Isolate* isolate) {
DCHECK(FLAG_enable_experimental_regexp_engine);
- DCHECK_EQ(re->TypeTag(), JSRegExp::EXPERIMENTAL);
+ DCHECK_EQ(re->type_tag(), JSRegExp::EXPERIMENTAL);
#ifdef VERIFY_HEAP
re->JSRegExpVerify(isolate);
#endif
- return re->DataAt(JSRegExp::kIrregexpLatin1BytecodeIndex) !=
- Smi::FromInt(JSRegExp::kUninitializedValue);
+ static constexpr bool kIsLatin1 = true;
+ return re->bytecode(kIsLatin1) != Smi::FromInt(JSRegExp::kUninitializedValue);
}
template <class T>
@@ -68,14 +68,14 @@ base::Optional<CompilationResult> CompileImpl(Isolate* isolate,
Handle<JSRegExp> regexp) {
Zone zone(isolate->allocator(), ZONE_NAME);
- Handle<String> source(regexp->Pattern(), isolate);
+ Handle<String> source(regexp->source(), isolate);
// Parse and compile the regexp source.
RegExpCompileData parse_result;
DCHECK(!isolate->has_pending_exception());
bool parse_success = RegExpParser::ParseRegExpFromHeapString(
- isolate, &zone, source, JSRegExp::AsRegExpFlags(regexp->GetFlags()),
+ isolate, &zone, source, JSRegExp::AsRegExpFlags(regexp->flags()),
&parse_result);
if (!parse_success) {
// The pattern was already parsed successfully during initialization, so
@@ -87,7 +87,7 @@ base::Optional<CompilationResult> CompileImpl(Isolate* isolate,
}
ZoneList<RegExpInstruction> bytecode = ExperimentalRegExpCompiler::Compile(
- parse_result.tree, JSRegExp::AsRegExpFlags(regexp->GetFlags()), &zone);
+ parse_result.tree, JSRegExp::AsRegExpFlags(regexp->flags()), &zone);
CompilationResult result;
result.bytecode = VectorToByteArray(isolate, bytecode.ToVector());
@@ -100,12 +100,12 @@ base::Optional<CompilationResult> CompileImpl(Isolate* isolate,
bool ExperimentalRegExp::Compile(Isolate* isolate, Handle<JSRegExp> re) {
DCHECK(FLAG_enable_experimental_regexp_engine);
- DCHECK_EQ(re->TypeTag(), JSRegExp::EXPERIMENTAL);
+ DCHECK_EQ(re->type_tag(), JSRegExp::EXPERIMENTAL);
#ifdef VERIFY_HEAP
re->JSRegExpVerify(isolate);
#endif
- Handle<String> source(re->Pattern(), isolate);
+ Handle<String> source(re->source(), isolate);
if (FLAG_trace_experimental_regexp_engine) {
StdoutStream{} << "Compiling experimental regexp " << *source << std::endl;
}
@@ -117,16 +117,8 @@ bool ExperimentalRegExp::Compile(Isolate* isolate, Handle<JSRegExp> re) {
return false;
}
- re->SetDataAt(JSRegExp::kIrregexpLatin1BytecodeIndex,
- *compilation_result->bytecode);
- re->SetDataAt(JSRegExp::kIrregexpUC16BytecodeIndex,
- *compilation_result->bytecode);
-
- Handle<Code> trampoline = BUILTIN_CODE(isolate, RegExpExperimentalTrampoline);
- re->SetDataAt(JSRegExp::kIrregexpLatin1CodeIndex, ToCodeT(*trampoline));
- re->SetDataAt(JSRegExp::kIrregexpUC16CodeIndex, ToCodeT(*trampoline));
-
- re->SetCaptureNameMap(compilation_result->capture_name_map);
+ re->set_bytecode_and_trampoline(isolate, compilation_result->bytecode);
+ re->set_capture_name_map(compilation_result->capture_name_map);
return true;
}
@@ -177,23 +169,22 @@ int32_t ExperimentalRegExp::ExecRaw(Isolate* isolate,
DisallowGarbageCollection no_gc;
if (FLAG_trace_experimental_regexp_engine) {
- String source = String::cast(regexp.DataAt(JSRegExp::kSourceIndex));
- StdoutStream{} << "Executing experimental regexp " << source << std::endl;
+ StdoutStream{} << "Executing experimental regexp " << regexp.source()
+ << std::endl;
}
- ByteArray bytecode =
- ByteArray::cast(regexp.DataAt(JSRegExp::kIrregexpLatin1BytecodeIndex));
+ static constexpr bool kIsLatin1 = true;
+ ByteArray bytecode = ByteArray::cast(regexp.bytecode(kIsLatin1));
return ExecRawImpl(isolate, call_origin, bytecode, subject,
- regexp.CaptureCount(), output_registers,
+ regexp.capture_count(), output_registers,
output_register_count, subject_index);
}
int32_t ExperimentalRegExp::MatchForCallFromJs(
Address subject, int32_t start_position, Address input_start,
Address input_end, int* output_registers, int32_t output_register_count,
- Address backtrack_stack, RegExp::CallOrigin call_origin, Isolate* isolate,
- Address regexp) {
+ RegExp::CallOrigin call_origin, Isolate* isolate, Address regexp) {
DCHECK(FLAG_enable_experimental_regexp_engine);
DCHECK_NOT_NULL(isolate);
DCHECK_NOT_NULL(output_registers);
@@ -217,7 +208,7 @@ MaybeHandle<Object> ExperimentalRegExp::Exec(
int subject_index, Handle<RegExpMatchInfo> last_match_info,
RegExp::ExecQuirks exec_quirks) {
DCHECK(FLAG_enable_experimental_regexp_engine);
- DCHECK_EQ(regexp->TypeTag(), JSRegExp::EXPERIMENTAL);
+ DCHECK_EQ(regexp->type_tag(), JSRegExp::EXPERIMENTAL);
#ifdef VERIFY_HEAP
regexp->JSRegExpVerify(isolate);
#endif
@@ -231,7 +222,7 @@ MaybeHandle<Object> ExperimentalRegExp::Exec(
subject = String::Flatten(isolate, subject);
- int capture_count = regexp->CaptureCount();
+ int capture_count = regexp->capture_count();
int output_register_count = JSRegExp::RegistersForCaptureCount(capture_count);
int32_t* output_registers;
@@ -275,7 +266,7 @@ int32_t ExperimentalRegExp::OneshotExecRaw(Isolate* isolate,
if (FLAG_trace_experimental_regexp_engine) {
StdoutStream{} << "Experimental execution (oneshot) of regexp "
- << regexp->Pattern() << std::endl;
+ << regexp->source() << std::endl;
}
base::Optional<CompilationResult> compilation_result =
@@ -285,7 +276,7 @@ int32_t ExperimentalRegExp::OneshotExecRaw(Isolate* isolate,
DisallowGarbageCollection no_gc;
return ExecRawImpl(isolate, RegExp::kFromRuntime,
*compilation_result->bytecode, *subject,
- regexp->CaptureCount(), output_registers,
+ regexp->capture_count(), output_registers,
output_register_count, subject_index);
}
@@ -294,9 +285,9 @@ MaybeHandle<Object> ExperimentalRegExp::OneshotExec(
int subject_index, Handle<RegExpMatchInfo> last_match_info,
RegExp::ExecQuirks exec_quirks) {
DCHECK(FLAG_enable_experimental_regexp_engine_on_excessive_backtracks);
- DCHECK_NE(regexp->TypeTag(), JSRegExp::NOT_COMPILED);
+ DCHECK_NE(regexp->type_tag(), JSRegExp::NOT_COMPILED);
- int capture_count = regexp->CaptureCount();
+ int capture_count = regexp->capture_count();
int output_register_count = JSRegExp::RegistersForCaptureCount(capture_count);
int32_t* output_registers;
diff --git a/deps/v8/src/regexp/experimental/experimental.h b/deps/v8/src/regexp/experimental/experimental.h
index 5987fb4d77..cdc683e97e 100644
--- a/deps/v8/src/regexp/experimental/experimental.h
+++ b/deps/v8/src/regexp/experimental/experimental.h
@@ -34,7 +34,6 @@ class ExperimentalRegExp final : public AllStatic {
Address input_start, Address input_end,
int* output_registers,
int32_t output_register_count,
- Address backtrack_stack,
RegExp::CallOrigin call_origin,
Isolate* isolate, Address regexp);
static MaybeHandle<Object> Exec(
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 6af1d02eed..913f704b33 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -40,8 +40,6 @@ namespace internal {
* - Isolate* isolate (address of the current isolate)
* - direct_call (if 1, direct call from JavaScript code, if 0
* call through the runtime system)
- * - stack_area_base (high end of the memory area to use as
- * backtracking stack)
* - capture array size (may fit multiple sets of matches)
* - int* capture_array (int[num_saved_registers_], for output).
* - end of input (address of end of string)
@@ -74,7 +72,6 @@ namespace internal {
* Address end,
* int* capture_output_array,
* int num_capture_registers,
- * byte* stack_area_base,
* bool direct_call = false,
* Isolate* isolate
* Address regexp);
@@ -88,8 +85,10 @@ RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ masm_(std::make_unique<MacroAssembler>(
+ isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -98,16 +97,12 @@ RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(Isolate* isolate, Zone* zone,
success_label_(),
backtrack_label_(),
exit_label_() {
- // Irregexp code clobbers ebx and spills/restores it at all boundaries.
- masm_->set_root_array_available(false);
-
DCHECK_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
__ bind(&start_label_); // And then continue from here.
}
RegExpMacroAssemblerIA32::~RegExpMacroAssemblerIA32() {
- delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
@@ -339,7 +334,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ mov(Operand(esp, 0 * kSystemPointerSize), edx);
{
- AllowExternalCallThatCantCauseGC scope(masm_);
+ AllowExternalCallThatCantCauseGC scope(masm_.get());
ExternalReference compare =
unicode ? ExternalReference::re_case_insensitive_compare_unicode(
isolate())
@@ -655,6 +650,38 @@ void RegExpMacroAssemblerIA32::Fail() {
__ jmp(&exit_label_);
}
+void RegExpMacroAssemblerIA32::LoadRegExpStackPointerFromMemory(Register dst) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ mov(dst, __ ExternalReferenceAsOperand(ref, dst));
+}
+
+void RegExpMacroAssemblerIA32::StoreRegExpStackPointerToMemory(
+ Register src, Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ mov(__ ExternalReferenceAsOperand(ref, scratch), src);
+}
+
+void RegExpMacroAssemblerIA32::PushRegExpBasePointer(Register stack_pointer,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(scratch, __ ExternalReferenceAsOperand(ref, scratch));
+ __ sub(scratch, stack_pointer);
+ __ mov(Operand(ebp, kRegExpStackBasePointer), scratch);
+}
+
+void RegExpMacroAssemblerIA32::PopRegExpBasePointer(Register stack_pointer_out,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(scratch, Operand(ebp, kRegExpStackBasePointer));
+ __ mov(stack_pointer_out,
+ __ ExternalReferenceAsOperand(ref, stack_pointer_out));
+ __ sub(stack_pointer_out, scratch);
+ StoreRegExpStackPointerToMemory(stack_pointer_out, scratch);
+}
Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
Label return_eax;
@@ -666,7 +693,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Tell the system that we have a stack frame. Because the type is MANUAL, no
// code is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
+ FrameScope scope(masm_.get(), StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
__ push(ebp);
@@ -676,41 +703,59 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ push(esi);
__ push(edi);
__ push(ebx); // Callee-save on MacOS.
+ STATIC_ASSERT(kLastCalleeSaveRegister == kBackup_ebx);
- STATIC_ASSERT(kSuccessfulCaptures == kBackup_ebx - kSystemPointerSize);
+ STATIC_ASSERT(kSuccessfulCaptures ==
+ kLastCalleeSaveRegister - kSystemPointerSize);
__ push(Immediate(0)); // Number of successful matches in a global regexp.
STATIC_ASSERT(kStringStartMinusOne ==
kSuccessfulCaptures - kSystemPointerSize);
__ push(Immediate(0)); // Make room for "string start - 1" constant.
STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
__ push(Immediate(0)); // The backtrack counter.
+ STATIC_ASSERT(kRegExpStackBasePointer ==
+ kBacktrackCount - kSystemPointerSize);
+ __ push(Immediate(0)); // The regexp stack base ptr.
+
+ // Initialize backtrack stack pointer. It must not be clobbered from here on.
+ // Note the backtrack_stackpointer is *not* callee-saved.
+ STATIC_ASSERT(backtrack_stackpointer() == ecx);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
+ // Store the regexp base pointer - we'll later restore it / write it to
+ // memory when returning from this irregexp code object.
+ PushRegExpBasePointer(backtrack_stackpointer(), eax);
+
+ {
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit, stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(isolate());
+ __ mov(eax, esp);
+ __ sub(eax, StaticVariable(stack_limit));
+ // Handle it if the stack pointer is already below the stack limit.
+ __ j(below_equal, &stack_limit_hit);
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ cmp(eax, num_registers_ * kSystemPointerSize);
+ __ j(above_equal, &stack_ok);
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ mov(eax, EXCEPTION);
+ __ jmp(&return_eax);
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
+ __ bind(&stack_limit_hit);
+ __ push(backtrack_stackpointer());
+ CallCheckStackGuardState(ebx);
+ __ pop(backtrack_stackpointer());
+ __ or_(eax, eax);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ j(not_zero, &return_eax);
+
+ __ bind(&stack_ok);
+ }
- ExternalReference stack_limit =
- ExternalReference::address_of_jslimit(isolate());
- __ mov(ecx, esp);
- __ sub(ecx, StaticVariable(stack_limit));
- // Handle it if the stack pointer is already below the stack limit.
- __ j(below_equal, &stack_limit_hit);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ cmp(ecx, num_registers_ * kSystemPointerSize);
- __ j(above_equal, &stack_ok);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ mov(eax, EXCEPTION);
- __ jmp(&return_eax);
-
- __ bind(&stack_limit_hit);
- CallCheckStackGuardState(ebx);
- __ or_(eax, eax);
- // If returned value is non-zero, we exit with the returned value as result.
- __ j(not_zero, &return_eax);
-
- __ bind(&stack_ok);
// Load start index for later use.
__ mov(ebx, Operand(ebp, kStartIndex));
@@ -735,18 +780,22 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// position registers.
__ mov(Operand(ebp, kStringStartMinusOne), eax);
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- __ j(not_equal, &load_char_start_regexp, Label::kNear);
- __ mov(current_character(), '\n');
- __ jmp(&start_regexp, Label::kNear);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
+ Label load_char_start_regexp;
+ {
+ Label start_regexp;
+
+ // Load newline if index is at start, previous character otherwise.
+ __ cmp(Operand(ebp, kStartIndex), Immediate(0));
+ __ j(not_equal, &load_char_start_regexp, Label::kNear);
+ __ mov(current_character(), '\n');
+ __ jmp(&start_regexp, Label::kNear);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+ }
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
@@ -754,6 +803,8 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Fill in stack push order, to avoid accessing across an unwritten
// page (a problem on Windows).
if (num_saved_registers_ > 8) {
+ DCHECK_EQ(ecx, backtrack_stackpointer());
+ __ push(ecx);
__ mov(ecx, kRegisterZero);
Label init_loop;
__ bind(&init_loop);
@@ -761,6 +812,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ sub(ecx, Immediate(kSystemPointerSize));
__ cmp(ecx, kRegisterZero - num_saved_registers_ * kSystemPointerSize);
__ j(greater, &init_loop);
+ __ pop(ecx);
} else { // Unroll the loop.
for (int i = 0; i < num_saved_registers_; i++) {
__ mov(register_location(i), eax);
@@ -768,9 +820,6 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
}
}
- // Initialize backtrack stack pointer.
- __ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
-
__ jmp(&start_label_);
// Exit code:
@@ -823,6 +872,10 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Prepare eax to initialize registers with its value in the next run.
__ mov(eax, Operand(ebp, kStringStartMinusOne));
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), ebx);
+
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// edx: capture start index
@@ -855,8 +908,12 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
}
__ bind(&return_eax);
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), ebx);
+
// Skip esp past regexp registers.
- __ lea(esp, Operand(ebp, kBackup_ebx));
+ __ lea(esp, Operand(ebp, kLastCalleeSaveRegister));
// Restore callee-save registers.
__ pop(ebx);
__ pop(edi);
@@ -877,7 +934,8 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
- __ push(backtrack_stackpointer());
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), edi);
+
__ push(edi);
CallCheckStackGuardState(ebx);
@@ -887,7 +945,9 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ j(not_zero, &return_eax);
__ pop(edi);
- __ pop(backtrack_stackpointer());
+
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
// String might have moved: Reload esi from frame.
__ mov(esi, Operand(ebp, kInputEnd));
SafeReturn();
@@ -898,21 +958,19 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
SafeCallTarget(&stack_overflow_label_);
// Reached if the backtrack-stack limit has been hit.
- // Save registers before calling C function
+ // Save registers before calling C function.
__ push(esi);
__ push(edi);
- // Call GrowStack(backtrack_stackpointer())
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, ebx);
- __ mov(Operand(esp, 2 * kSystemPointerSize),
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), edi);
+
+ // Call GrowStack(isolate).
+ static const int kNumArguments = 1;
+ __ PrepareCallCFunction(kNumArguments, ebx);
+ __ mov(Operand(esp, 0 * kSystemPointerSize),
Immediate(ExternalReference::isolate_address(isolate())));
- __ lea(eax, Operand(ebp, kStackHighEnd));
- __ mov(Operand(esp, 1 * kSystemPointerSize), eax);
- __ mov(Operand(esp, 0 * kSystemPointerSize), backtrack_stackpointer());
- ExternalReference grow_stack =
- ExternalReference::re_grow_stack(isolate());
- __ CallCFunction(grow_stack, num_arguments);
+ __ CallCFunction(ExternalReference::re_grow_stack(isolate()),
+ kNumArguments);
// If return nullptr, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ or_(eax, eax);
@@ -1019,10 +1077,21 @@ void RegExpMacroAssemblerIA32::ReadCurrentPositionFromRegister(int reg) {
__ mov(edi, register_location(reg));
}
+void RegExpMacroAssemblerIA32::WriteStackPointerToRegister(int reg) {
+ ExternalReference stack_top_address =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(eax, __ ExternalReferenceAsOperand(stack_top_address, eax));
+ __ sub(eax, backtrack_stackpointer());
+ __ mov(register_location(reg), eax);
+}
void RegExpMacroAssemblerIA32::ReadStackPointerFromRegister(int reg) {
- __ mov(backtrack_stackpointer(), register_location(reg));
- __ add(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
+ ExternalReference stack_top_address =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(backtrack_stackpointer(),
+ __ ExternalReferenceAsOperand(stack_top_address,
+ backtrack_stackpointer()));
+ __ sub(backtrack_stackpointer(), register_location(reg));
}
void RegExpMacroAssemblerIA32::SetCurrentPositionFromEnd(int by) {
@@ -1069,14 +1138,6 @@ void RegExpMacroAssemblerIA32::ClearRegisters(int reg_from, int reg_to) {
}
}
-
-void RegExpMacroAssemblerIA32::WriteStackPointerToRegister(int reg) {
- __ mov(eax, backtrack_stackpointer());
- __ sub(eax, Operand(ebp, kStackHighEnd));
- __ mov(register_location(reg), eax);
-}
-
-
// Private methods:
void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
index 93fb2c9aba..30275036dd 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
@@ -105,8 +105,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32
// one set of capture results. For the case of non-global regexp, we ignore
// this value.
static const int kNumOutputRegisters = kRegisterOutput + kSystemPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kSystemPointerSize;
- static const int kDirectCall = kStackHighEnd + kSystemPointerSize;
+ static const int kDirectCall = kNumOutputRegisters + kSystemPointerSize;
static const int kIsolate = kDirectCall + kSystemPointerSize;
// Below the frame pointer - local stack variables.
// When adding local variables remember to push space for them in
@@ -114,12 +113,20 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32
static const int kBackup_esi = kFramePointer - kSystemPointerSize;
static const int kBackup_edi = kBackup_esi - kSystemPointerSize;
static const int kBackup_ebx = kBackup_edi - kSystemPointerSize;
- static const int kSuccessfulCaptures = kBackup_ebx - kSystemPointerSize;
+ static const int kLastCalleeSaveRegister = kBackup_ebx;
+
+ static const int kSuccessfulCaptures =
+ kLastCalleeSaveRegister - kSystemPointerSize;
static const int kStringStartMinusOne =
kSuccessfulCaptures - kSystemPointerSize;
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // Stores the initial value of the regexp stack pointer in a
+ // position-independent representation (in case the regexp stack grows and
+ // thus moves).
+ static const int kRegExpStackBasePointer =
+ kBacktrackCount - kSystemPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
+ static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
@@ -137,14 +144,14 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32
Operand register_location(int register_index);
// The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return edx; }
+ static constexpr Register current_character() { return edx; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- inline Register backtrack_stackpointer() { return ecx; }
+ static constexpr Register backtrack_stackpointer() { return ecx; }
// Byte size of chars in the string to match (decided by the Mode argument)
- inline int char_size() { return static_cast<int>(mode_); }
+ inline int char_size() const { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is nullptr, in which case it is a conditional Backtrack.
@@ -168,19 +175,25 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32
// (ecx) and increments it by a word size.
inline void Pop(Register target);
+ void LoadRegExpStackPointerFromMemory(Register dst);
+ void StoreRegExpStackPointerToMemory(Register src, Register scratch);
+ void PushRegExpBasePointer(Register stack_pointer, Register scratch);
+ void PopRegExpBasePointer(Register stack_pointer_out, Register scratch);
+
Isolate* isolate() const { return masm_->isolate(); }
- MacroAssembler* masm_;
+ const std::unique_ptr<MacroAssembler> masm_;
+ const NoRootArrayScope no_root_array_scope_;
// Which mode to generate code for (LATIN1 or UC16).
- Mode mode_;
+ const Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
- // are always 0..num_saved_registers_-1)
- int num_saved_registers_;
+ // are always 0..num_saved_registers_-1).
+ const int num_saved_registers_;
// Labels used internally.
Label entry_label_;
diff --git a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
index d95a6e7d60..0c2b83ba88 100644
--- a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
+++ b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
@@ -6,14 +6,12 @@
#include "src/regexp/loong64/regexp-macro-assembler-loong64.h"
-#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
+#include "src/heap/factory.h"
#include "src/logging/log.h"
-#include "src/objects/objects-inl.h"
-#include "src/regexp/regexp-macro-assembler.h"
+#include "src/objects/code-inl.h"
#include "src/regexp/regexp-stack.h"
#include "src/snapshot/embedded/embedded-data.h"
-#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -48,19 +46,18 @@ namespace internal {
* - fp[0..63] s0..s7 Callee-saved registers s0..s7.
* --- frame pointer ----
* - fp[-8] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall
- * - fp[-16] stack_base (Top of backtracking stack). kStackHighEnd
- * - fp[-24] capture array size (may fit multiple sets of matches) kNumOutputRegisters
- * - fp[-32] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput
- * - fp[-40] end of input (address of end of string). kInputEnd
- * - fp[-48] start of input (address of first character in string). kInputStart
- * - fp[-56] start index (character index of start). kStartIndex
- * - fp[-64] void* input_string (location of a handle containing the string). kInputString
- * - fp[-72] success counter (only for global regexps to count matches). kSuccessfulCaptures
- * - fp[-80] Offset of location before start of input (effectively character kStringStartMinusOne
+ * - fp[-16] capture array size (may fit multiple sets of matches) kNumOutputRegisters
+ * - fp[-24] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput
+ * - fp[-32] end of input (address of end of string). kInputEnd
+ * - fp[-40] start of input (address of first character in string). kInputStart
+ * - fp[-48] start index (character index of start). kStartIndex
+ * - fp[-56] void* input_string (location of a handle containing the string). kInputString
+ * - fp[-64] success counter (only for global regexps to count matches). kSuccessfulCaptures
+ * - fp[-72] Offset of location before start of input (effectively character kStringStartMinusOne
* position -1). Used to initialize capture registers to a
* non-position.
* --------- The following output registers are 32-bit values. ---------
- * - fp[-88] register 0 (Only positions must be stored in the first kRegisterZero
+ * - fp[-80] register 0 (Only positions must be stored in the first kRegisterZero
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
@@ -79,7 +76,6 @@ namespace internal {
* Address end,
* int* capture_output_array,
* int num_capture_registers,
- * byte* stack_area_base,
* bool direct_call = false,
* Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
@@ -96,8 +92,10 @@ RegExpMacroAssemblerLOONG64::RegExpMacroAssemblerLOONG64(Isolate* isolate,
Zone* zone, Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ masm_(std::make_unique<MacroAssembler>(
+ isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -107,8 +105,6 @@ RegExpMacroAssemblerLOONG64::RegExpMacroAssemblerLOONG64(Isolate* isolate,
backtrack_label_(),
exit_label_(),
internal_failure_label_() {
- masm_->set_root_array_available(false);
-
DCHECK_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
// If the code gets too big or corrupted, an internal exception will be
@@ -120,7 +116,6 @@ RegExpMacroAssemblerLOONG64::RegExpMacroAssemblerLOONG64(Isolate* isolate,
}
RegExpMacroAssemblerLOONG64::~RegExpMacroAssemblerLOONG64() {
- delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
@@ -336,7 +331,7 @@ void RegExpMacroAssemblerLOONG64::CheckNotBackReferenceIgnoreCase(
__ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
{
- AllowExternalCallThatCantCauseGC scope(masm_);
+ AllowExternalCallThatCantCauseGC scope(masm_.get());
ExternalReference function =
unicode ? ExternalReference::re_case_insensitive_compare_unicode(
isolate())
@@ -592,6 +587,44 @@ void RegExpMacroAssemblerLOONG64::Fail() {
__ jmp(&exit_label_);
}
+void RegExpMacroAssemblerLOONG64::LoadRegExpStackPointerFromMemory(
+ Register dst) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ li(dst, ref);
+ __ Ld_d(dst, MemOperand(dst, 0));
+}
+
+void RegExpMacroAssemblerLOONG64::StoreRegExpStackPointerToMemory(
+ Register src, Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ li(scratch, ref);
+ __ St_d(src, MemOperand(scratch, 0));
+}
+
+void RegExpMacroAssemblerLOONG64::PushRegExpBasePointer(Register scratch1,
+ Register scratch2) {
+ LoadRegExpStackPointerFromMemory(scratch1);
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(scratch2, ref);
+ __ Ld_d(scratch2, MemOperand(scratch2, 0));
+ __ Sub_d(scratch2, scratch1, scratch2);
+ __ St_d(scratch2, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+}
+
+void RegExpMacroAssemblerLOONG64::PopRegExpBasePointer(Register scratch1,
+ Register scratch2) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ Ld_d(scratch1, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ li(scratch2, ref);
+ __ Ld_d(scratch2, MemOperand(scratch2, 0));
+ __ Add_d(scratch1, scratch1, scratch2);
+ StoreRegExpStackPointerToMemory(scratch1, scratch2);
+}
+
Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
Label return_v0;
if (0 /* todo masm_->has_exception()*/) {
@@ -608,7 +641,7 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
// Tell the system that we have a stack frame. Because the type is MANUAL,
// no is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
+ FrameScope scope(masm_.get(), StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
// Push arguments
@@ -637,6 +670,13 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
__ Push(a0); // Make room for "string start - 1" constant.
STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
__ Push(a0); // The backtrack counter
+ STATIC_ASSERT(kRegExpStackBasePointer ==
+ kBacktrackCount - kSystemPointerSize);
+ __ Push(a0); // The regexp stack base ptr.
+
+ // Store the regexp base pointer - we'll later restore it / write it to
+ // memory when returning from this irregexp code object.
+ PushRegExpBasePointer(a0, a1);
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -717,8 +757,7 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
}
// Initialize backtrack stack pointer.
- __ Ld_d(backtrack_stackpointer(),
- MemOperand(frame_pointer(), kStackHighEnd));
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
__ jmp(&start_label_);
@@ -820,6 +859,10 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
}
__ bind(&return_v0);
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(a1, a2);
+
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers s0..s7 and return (restoring ra to pc).
@@ -838,6 +881,8 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
// Put regexp engine registers on stack.
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a1);
+
RegList regexp_registers_to_retain = current_input_offset().bit() |
current_character().bit() |
backtrack_stackpointer().bit();
@@ -848,34 +893,36 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
// result as return value.
__ Branch(&return_v0, ne, a0, Operand(zero_reg));
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
// String might have moved: Reload end of string from frame.
__ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+
SafeReturn();
}
// Backtrack stack overflow code.
if (stack_overflow_label_.is_linked()) {
SafeCallTarget(&stack_overflow_label_);
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a1);
// Reached if the backtrack-stack limit has been hit.
// Put regexp engine registers on stack first.
RegList regexp_registers =
current_input_offset().bit() | current_character().bit();
__ MultiPush(regexp_registers);
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, a0);
- __ mov(a0, backtrack_stackpointer());
- __ Add_d(a1, frame_pointer(), Operand(kStackHighEnd));
- __ li(a2, Operand(ExternalReference::isolate_address(masm_->isolate())));
+ // Call GrowStack(isolate).
+ static const int kNumArguments = 1;
+ __ PrepareCallCFunction(kNumArguments, a0);
+ __ li(a0, Operand(ExternalReference::isolate_address(masm_->isolate())));
ExternalReference grow_stack =
ExternalReference::re_grow_stack(masm_->isolate());
- __ CallCFunction(grow_stack, num_arguments);
+ __ CallCFunction(grow_stack, kNumArguments);
// Restore regexp registers.
__ MultiPop(regexp_registers);
- // If return nullptr, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
+ // If nullptr is returned, we have failed to grow the stack, and must exit
+ // with a stack-overflow exception.
__ Branch(&exit_with_exception, eq, a0, Operand(zero_reg));
// Otherwise use return value as new stack pointer.
__ mov(backtrack_stackpointer(), a0);
@@ -956,7 +1003,7 @@ void RegExpMacroAssemblerLOONG64::PushBacktrack(Label* label) {
int target = label->pos();
__ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
} else {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_.get());
Label after_constant;
__ Branch(&after_constant);
int offset = masm_->pc_offset();
@@ -991,9 +1038,21 @@ void RegExpMacroAssemblerLOONG64::ReadCurrentPositionFromRegister(int reg) {
__ Ld_d(current_input_offset(), register_location(reg));
}
+void RegExpMacroAssemblerLOONG64::WriteStackPointerToRegister(int reg) {
+ ExternalReference stack_top_address =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(a0, stack_top_address);
+ __ Ld_d(a0, MemOperand(a0, 0));
+ __ Sub_d(a0, backtrack_stackpointer(), a0);
+ __ St_d(a0, register_location(reg));
+}
+
void RegExpMacroAssemblerLOONG64::ReadStackPointerFromRegister(int reg) {
- __ Ld_d(backtrack_stackpointer(), register_location(reg));
- __ Ld_d(a0, MemOperand(frame_pointer(), kStackHighEnd));
+ ExternalReference stack_top_address =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(backtrack_stackpointer(), stack_top_address);
+ __ Ld_d(backtrack_stackpointer(), MemOperand(backtrack_stackpointer(), 0));
+ __ Ld_d(a0, register_location(reg));
__ Add_d(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
}
@@ -1038,12 +1097,6 @@ void RegExpMacroAssemblerLOONG64::ClearRegisters(int reg_from, int reg_to) {
}
}
-void RegExpMacroAssemblerLOONG64::WriteStackPointerToRegister(int reg) {
- __ Ld_d(a1, MemOperand(frame_pointer(), kStackHighEnd));
- __ Sub_d(a0, backtrack_stackpointer(), a1);
- __ St_d(a0, register_location(reg));
-}
-
// Private methods:
void RegExpMacroAssemblerLOONG64::CallCheckStackGuardState(Register scratch) {
diff --git a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h
index ea567543db..4f1e3217fa 100644
--- a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h
+++ b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h
@@ -5,8 +5,6 @@
#ifndef V8_REGEXP_LOONG64_REGEXP_MACRO_ASSEMBLER_LOONG64_H_
#define V8_REGEXP_LOONG64_REGEXP_MACRO_ASSEMBLER_LOONG64_H_
-#include "src/base/strings.h"
-#include "src/codegen/loong64/assembler-loong64.h"
#include "src/codegen/macro-assembler.h"
#include "src/regexp/regexp-macro-assembler.h"
@@ -93,35 +91,39 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerLOONG64
static const int kFramePointer = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
- // Registers s0 to s7, fp, and ra.
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
// TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp.
- static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
+ static const int kReturnAddress = kStoredRegisters + 9 * kSystemPointerSize;
// Stack frame header.
static const int kStackFrameHeader = kReturnAddress;
- // Stack parameters placed by caller.
- static const int kIsolate = kStackFrameHeader + kPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
- static const int kDirectCall = kFramePointer - kPointerSize;
- static const int kStackHighEnd = kDirectCall - kPointerSize;
- static const int kNumOutputRegisters = kStackHighEnd - kPointerSize;
- static const int kRegisterOutput = kNumOutputRegisters - kPointerSize;
- static const int kInputEnd = kRegisterOutput - kPointerSize;
- static const int kInputStart = kInputEnd - kPointerSize;
- static const int kStartIndex = kInputStart - kPointerSize;
- static const int kInputString = kStartIndex - kPointerSize;
+ static const int kIsolate = kFramePointer - kSystemPointerSize;
+ static const int kDirectCall = kIsolate - kSystemPointerSize;
+ static const int kNumOutputRegisters = kDirectCall - kSystemPointerSize;
+ static const int kRegisterOutput = kNumOutputRegisters - kSystemPointerSize;
+ static const int kInputEnd = kRegisterOutput - kSystemPointerSize;
+ static const int kInputStart = kInputEnd - kSystemPointerSize;
+ static const int kStartIndex = kInputStart - kSystemPointerSize;
+ static const int kInputString = kStartIndex - kSystemPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kSuccessfulCaptures = kInputString - kSystemPointerSize;
+ static const int kStringStartMinusOne =
+ kSuccessfulCaptures - kSystemPointerSize;
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // Stores the initial value of the regexp stack pointer in a
+ // position-independent representation (in case the regexp stack grows and
+ // thus moves).
+ static const int kRegExpStackBasePointer =
+ kBacktrackCount - kSystemPointerSize;
+
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
+ static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
@@ -140,24 +142,24 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerLOONG64
// Register holding the current input position as negative offset from
// the end of the string.
- inline Register current_input_offset() { return a6; }
+ static constexpr Register current_input_offset() { return a6; }
// The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return a7; }
+ static constexpr Register current_character() { return a7; }
// Register holding address of the end of the input string.
- inline Register end_of_input_address() { return t2; }
+ static constexpr Register end_of_input_address() { return t2; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
- inline Register frame_pointer() { return fp; }
+ static constexpr Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- inline Register backtrack_stackpointer() { return t0; }
+ static constexpr Register backtrack_stackpointer() { return t0; }
// Register holding pointer to the current code object.
- inline Register code_pointer() { return a5; }
+ static constexpr Register code_pointer() { return a5; }
// Byte size of chars in the string to match (decided by the Mode argument).
inline int char_size() { return static_cast<int>(mode_); }
@@ -182,19 +184,26 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerLOONG64
// and increments it by a word size.
inline void Pop(Register target);
+ void LoadRegExpStackPointerFromMemory(Register dst);
+ void StoreRegExpStackPointerToMemory(Register src, Register scratch);
+ void PushRegExpBasePointer(Register scratch1, Register scratch2);
+ void PopRegExpBasePointer(Register scratch1, Register scratch2);
+
Isolate* isolate() const { return masm_->isolate(); }
- MacroAssembler* masm_;
+ const std::unique_ptr<MacroAssembler> masm_;
+
+ const NoRootArrayScope no_root_array_scope_;
// Which mode to generate code for (Latin1 or UC16).
- Mode mode_;
+ const Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1).
- int num_saved_registers_;
+ const int num_saved_registers_;
// Labels used internally.
Label entry_label_;
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index db4f2480b8..5dee159beb 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -6,14 +6,12 @@
#include "src/regexp/mips/regexp-macro-assembler-mips.h"
-#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
+#include "src/codegen/mips/assembler-mips-inl.h"
#include "src/logging/log.h"
-#include "src/objects/objects-inl.h"
-#include "src/regexp/regexp-macro-assembler.h"
+#include "src/objects/code-inl.h"
#include "src/regexp/regexp-stack.h"
#include "src/snapshot/embedded/embedded-data.h"
-#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -39,11 +37,9 @@ namespace internal {
*
* The stack will have the following structure:
*
- * - fp[60] Isolate* isolate (address of the current isolate)
- * - fp[56] direct_call (if 1, direct call from JavaScript code,
+ * - fp[56] Isolate* isolate (address of the current isolate)
+ * - fp[52] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
- * - fp[52] stack_area_base (High end of the memory area to use as
- * backtracking stack).
* - fp[48] capture array size (may fit multiple sets of matches)
* - fp[44] int* capture_array (int[num_saved_registers_], for output).
* --- sp when called ---
@@ -80,7 +76,6 @@ namespace internal {
* Address end,
* int* capture_output_array,
* int num_capture_registers,
- * byte* stack_area_base,
* bool direct_call = false,
* Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
@@ -95,8 +90,10 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ masm_(std::make_unique<MacroAssembler>(
+ isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -106,8 +103,6 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
backtrack_label_(),
exit_label_(),
internal_failure_label_() {
- masm_->set_root_array_available(false);
-
DCHECK_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
// If the code gets too big or corrupted, an internal exception will be
@@ -119,7 +114,6 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
}
RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() {
- delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
@@ -342,7 +336,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
{
- AllowExternalCallThatCantCauseGC scope(masm_);
+ AllowExternalCallThatCantCauseGC scope(masm_.get());
ExternalReference function =
unicode ? ExternalReference::re_case_insensitive_compare_unicode(
isolate())
@@ -607,6 +601,42 @@ void RegExpMacroAssemblerMIPS::Fail() {
__ jmp(&exit_label_);
}
+void RegExpMacroAssemblerMIPS::LoadRegExpStackPointerFromMemory(Register dst) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ li(dst, Operand(ref));
+ __ Lw(dst, MemOperand(dst));
+}
+
+void RegExpMacroAssemblerMIPS::StoreRegExpStackPointerToMemory(
+ Register src, Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ li(scratch, Operand(ref));
+ __ Sw(src, MemOperand(scratch));
+}
+
+void RegExpMacroAssemblerMIPS::PushRegExpBasePointer(Register scratch1,
+ Register scratch2) {
+ LoadRegExpStackPointerFromMemory(scratch1);
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(scratch2, Operand(ref));
+ __ Lw(scratch2, MemOperand(scratch2));
+ __ Subu(scratch2, scratch1, scratch2);
+ __ Sw(scratch2, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+}
+
+void RegExpMacroAssemblerMIPS::PopRegExpBasePointer(Register scratch1,
+ Register scratch2) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ Lw(scratch1, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ li(scratch2, Operand(ref));
+ __ Lw(scratch2, MemOperand(scratch2));
+ __ Addu(scratch1, scratch1, scratch2);
+ StoreRegExpStackPointerToMemory(scratch1, scratch2);
+}
Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
Label return_v0;
@@ -624,7 +654,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Tell the system that we have a stack frame. Because the type is MANUAL,
// no is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
+ FrameScope scope(masm_.get(), StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
// Push arguments
@@ -648,6 +678,13 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ push(a0); // Make room for "string start - 1" constant.
STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
__ push(a0);
+ STATIC_ASSERT(kRegExpStackBasePointer ==
+ kBacktrackCount - kSystemPointerSize);
+ __ push(a0); // The regexp stack base ptr.
+
+ // Store the regexp base pointer - we'll later restore it / write it to
+ // memory when returning from this irregexp code object.
+ PushRegExpBasePointer(a0, a1);
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -728,7 +765,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
}
// Initialize backtrack stack pointer.
- __ lw(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
__ jmp(&start_label_);
@@ -830,6 +867,10 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
}
__ bind(&return_v0);
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(a0, a1);
+
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers s0..s7 and return (restoring ra to pc).
@@ -847,6 +888,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Preempt-code.
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a0);
// Put regexp engine registers on stack.
RegList regexp_registers_to_retain = current_input_offset().bit() |
current_character().bit() | backtrack_stackpointer().bit();
@@ -857,6 +899,8 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// result as return value.
__ Branch(&return_v0, ne, v0, Operand(zero_reg));
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
// String might have moved: Reload end of string from frame.
__ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
@@ -866,25 +910,24 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Backtrack stack overflow code.
if (stack_overflow_label_.is_linked()) {
SafeCallTarget(&stack_overflow_label_);
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a0);
// Reached if the backtrack-stack limit has been hit.
// Put regexp engine registers on stack first.
RegList regexp_registers = current_input_offset().bit() |
current_character().bit();
__ MultiPush(regexp_registers);
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, a0);
- __ mov(a0, backtrack_stackpointer());
- __ Addu(a1, frame_pointer(), Operand(kStackHighEnd));
- __ li(a2, Operand(ExternalReference::isolate_address(masm_->isolate())));
+ // Call GrowStack(isolate).
+ static constexpr int kNumArguments = 1;
+ __ PrepareCallCFunction(kNumArguments, a0);
+ __ li(a0, Operand(ExternalReference::isolate_address(masm_->isolate())));
ExternalReference grow_stack =
ExternalReference::re_grow_stack(masm_->isolate());
- __ CallCFunction(grow_stack, num_arguments);
+ __ CallCFunction(grow_stack, kNumArguments);
// Restore regexp registers.
__ MultiPop(regexp_registers);
- // If return nullptr, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
+ // If nullptr is returned, we have failed to grow the stack, and must exit
+ // with a stack-overflow exception.
__ Branch(&exit_with_exception, eq, v0, Operand(zero_reg));
// Otherwise use return value as new stack pointer.
__ mov(backtrack_stackpointer(), v0);
@@ -976,7 +1019,7 @@ void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
int target = label->pos();
__ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
} else {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_.get());
Label after_constant;
__ Branch(&after_constant);
int offset = masm_->pc_offset();
@@ -1013,10 +1056,21 @@ void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(int reg) {
__ lw(current_input_offset(), register_location(reg));
}
+void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(a0, Operand(ref));
+ __ Lw(a0, MemOperand(a0));
+ __ Subu(a0, backtrack_stackpointer(), a0);
+ __ Sw(a0, register_location(reg));
+}
void RegExpMacroAssemblerMIPS::ReadStackPointerFromRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(a0, Operand(ref));
+ __ Lw(a0, MemOperand(a0));
__ lw(backtrack_stackpointer(), register_location(reg));
- __ lw(a0, MemOperand(frame_pointer(), kStackHighEnd));
__ Addu(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
}
@@ -1068,14 +1122,6 @@ void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
}
}
-
-void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
- __ lw(a1, MemOperand(frame_pointer(), kStackHighEnd));
- __ Subu(a0, backtrack_stackpointer(), a1);
- __ sw(a0, register_location(reg));
-}
-
-
bool RegExpMacroAssemblerMIPS::CanReadUnaligned() {
return false;
}
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
index 9f85d94d65..ac69bd7a0f 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
@@ -5,9 +5,7 @@
#ifndef V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
#define V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
-#include "src/base/strings.h"
#include "src/codegen/macro-assembler.h"
-#include "src/codegen/mips/assembler-mips.h"
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
@@ -94,7 +92,6 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
static const int kFramePointer = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
- // Registers s0 to s7, fp, and ra.
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
@@ -103,8 +100,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
// Stack parameters placed by caller.
static const int kRegisterOutput = kStackFrameHeader + 20;
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kDirectCall = kNumOutputRegisters + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
// Below the frame pointer.
@@ -118,8 +114,14 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
static const int kSuccessfulCaptures = kInputString - kPointerSize;
static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // Stores the initial value of the regexp stack pointer in a
+ // position-independent representation (in case the regexp stack grows and
+ // thus moves).
+ static const int kRegExpStackBasePointer =
+ kBacktrackCount - kSystemPointerSize;
+
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
+ static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
@@ -130,7 +132,6 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();
-
// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState(Register scratch);
@@ -139,27 +140,27 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
// Register holding the current input position as negative offset from
// the end of the string.
- inline Register current_input_offset() { return t2; }
+ static constexpr Register current_input_offset() { return t2; }
// The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return t3; }
+ static constexpr Register current_character() { return t3; }
// Register holding address of the end of the input string.
- inline Register end_of_input_address() { return t6; }
+ static constexpr Register end_of_input_address() { return t6; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
- inline Register frame_pointer() { return fp; }
+ static constexpr Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- inline Register backtrack_stackpointer() { return t4; }
+ static constexpr Register backtrack_stackpointer() { return t4; }
// Register holding pointer to the current code object.
- inline Register code_pointer() { return t1; }
+ static constexpr Register code_pointer() { return t1; }
// Byte size of chars in the string to match (decided by the Mode argument).
- inline int char_size() { return static_cast<int>(mode_); }
+ inline int char_size() const { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is nullptr, in which case it is a conditional Backtrack.
@@ -185,19 +186,25 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
// and increments it by a word size.
inline void Pop(Register target);
+ void LoadRegExpStackPointerFromMemory(Register dst);
+ void StoreRegExpStackPointerToMemory(Register src, Register scratch);
+ void PushRegExpBasePointer(Register scratch1, Register scratch2);
+ void PopRegExpBasePointer(Register scratch1, Register scratch2);
+
Isolate* isolate() const { return masm_->isolate(); }
- MacroAssembler* masm_;
+ const std::unique_ptr<MacroAssembler> masm_;
+ const NoRootArrayScope no_root_array_scope_;
// Which mode to generate code for (Latin1 or UC16).
- Mode mode_;
+ const Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1).
- int num_saved_registers_;
+ const int num_saved_registers_;
// Labels used internally.
Label entry_label_;
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 7e3ab11a46..eb69ad7807 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -6,14 +6,13 @@
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
-#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
+#include "src/codegen/mips64/assembler-mips64-inl.h"
+#include "src/heap/factory.h"
#include "src/logging/log.h"
-#include "src/objects/objects-inl.h"
-#include "src/regexp/regexp-macro-assembler.h"
+#include "src/objects/code-inl.h"
#include "src/regexp/regexp-stack.h"
#include "src/snapshot/embedded/embedded-data.h"
-#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -83,19 +82,18 @@ namespace internal {
* - fp[0..63] s0..s7 Callee-saved registers s0..s7.
* --- frame pointer ----
* - fp[-8] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall
- * - fp[-16] stack_base (Top of backtracking stack). kStackHighEnd
- * - fp[-24] capture array size (may fit multiple sets of matches) kNumOutputRegisters
- * - fp[-32] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput
- * - fp[-40] end of input (address of end of string). kInputEnd
- * - fp[-48] start of input (address of first character in string). kInputStart
- * - fp[-56] start index (character index of start). kStartIndex
- * - fp[-64] void* input_string (location of a handle containing the string). kInputString
- * - fp[-72] success counter (only for global regexps to count matches). kSuccessfulCaptures
- * - fp[-80] Offset of location before start of input (effectively character kStringStartMinusOne
+ * - fp[-16] capture array size (may fit multiple sets of matches) kNumOutputRegisters
+ * - fp[-24] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput
+ * - fp[-32] end of input (address of end of string). kInputEnd
+ * - fp[-40] start of input (address of first character in string). kInputStart
+ * - fp[-48] start index (character index of start). kStartIndex
+ * - fp[-56] void* input_string (location of a handle containing the string). kInputString
+ * - fp[-64] success counter (only for global regexps to count matches). kSuccessfulCaptures
+ * - fp[-72] Offset of location before start of input (effectively character kStringStartMinusOne
* position -1). Used to initialize capture registers to a
* non-position.
* --------- The following output registers are 32-bit values. ---------
- * - fp[-88] register 0 (Only positions must be stored in the first kRegisterZero
+ * - fp[-80] register 0 (Only positions must be stored in the first kRegisterZero
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
@@ -114,7 +112,6 @@ namespace internal {
* Address end,
* int* capture_output_array,
* int num_capture_registers,
- * byte* stack_area_base,
* bool direct_call = false,
* Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
@@ -131,8 +128,10 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ masm_(std::make_unique<MacroAssembler>(
+ isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -142,8 +141,6 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
backtrack_label_(),
exit_label_(),
internal_failure_label_() {
- masm_->set_root_array_available(false);
-
DCHECK_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
// If the code gets too big or corrupted, an internal exception will be
@@ -155,7 +152,6 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
}
RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() {
- delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
@@ -378,7 +374,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
{
- AllowExternalCallThatCantCauseGC scope(masm_);
+ AllowExternalCallThatCantCauseGC scope(masm_.get());
ExternalReference function =
unicode ? ExternalReference::re_case_insensitive_compare_unicode(
isolate())
@@ -637,6 +633,42 @@ void RegExpMacroAssemblerMIPS::Fail() {
__ jmp(&exit_label_);
}
+void RegExpMacroAssemblerMIPS::LoadRegExpStackPointerFromMemory(Register dst) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ li(dst, Operand(ref));
+ __ Ld(dst, MemOperand(dst));
+}
+
+void RegExpMacroAssemblerMIPS::StoreRegExpStackPointerToMemory(
+ Register src, Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ li(scratch, Operand(ref));
+ __ Sd(src, MemOperand(scratch));
+}
+
+void RegExpMacroAssemblerMIPS::PushRegExpBasePointer(Register scratch1,
+ Register scratch2) {
+ LoadRegExpStackPointerFromMemory(scratch1);
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(scratch2, Operand(ref));
+ __ Ld(scratch2, MemOperand(scratch2));
+ __ Dsubu(scratch2, scratch1, scratch2);
+ __ Sd(scratch2, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+}
+
+void RegExpMacroAssemblerMIPS::PopRegExpBasePointer(Register scratch1,
+ Register scratch2) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ Ld(scratch1, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ li(scratch2, Operand(ref));
+ __ Ld(scratch2, MemOperand(scratch2));
+ __ Daddu(scratch1, scratch1, scratch2);
+ StoreRegExpStackPointerToMemory(scratch1, scratch2);
+}
Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
Label return_v0;
@@ -654,7 +686,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Tell the system that we have a stack frame. Because the type is MANUAL,
// no is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
+ FrameScope scope(masm_.get(), StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
// Push arguments
@@ -683,6 +715,13 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ push(a0); // Make room for "string start - 1" constant.
STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
__ push(a0); // The backtrack counter
+ STATIC_ASSERT(kRegExpStackBasePointer ==
+ kBacktrackCount - kSystemPointerSize);
+ __ push(a0); // The regexp stack base ptr.
+
+ // Store the regexp base pointer - we'll later restore it / write it to
+ // memory when returning from this irregexp code object.
+ PushRegExpBasePointer(a0, a1);
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -763,7 +802,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
}
// Initialize backtrack stack pointer.
- __ Ld(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
__ jmp(&start_label_);
@@ -866,6 +905,10 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
}
__ bind(&return_v0);
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(a0, a1);
+
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers s0..s7 and return (restoring ra to pc).
@@ -883,6 +926,8 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Preempt-code.
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a0);
+
// Put regexp engine registers on stack.
RegList regexp_registers_to_retain = current_input_offset().bit() |
current_character().bit() | backtrack_stackpointer().bit();
@@ -893,6 +938,8 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// result as return value.
__ Branch(&return_v0, ne, v0, Operand(zero_reg));
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
// String might have moved: Reload end of string from frame.
__ Ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
@@ -902,25 +949,24 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Backtrack stack overflow code.
if (stack_overflow_label_.is_linked()) {
SafeCallTarget(&stack_overflow_label_);
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a0);
// Reached if the backtrack-stack limit has been hit.
// Put regexp engine registers on stack first.
RegList regexp_registers = current_input_offset().bit() |
current_character().bit();
__ MultiPush(regexp_registers);
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, a0);
- __ mov(a0, backtrack_stackpointer());
- __ Daddu(a1, frame_pointer(), Operand(kStackHighEnd));
- __ li(a2, Operand(ExternalReference::isolate_address(masm_->isolate())));
+ // Call GrowStack(isolate)
+ static constexpr int kNumArguments = 1;
+ __ PrepareCallCFunction(kNumArguments, a0);
+ __ li(a0, Operand(ExternalReference::isolate_address(masm_->isolate())));
ExternalReference grow_stack =
ExternalReference::re_grow_stack(masm_->isolate());
- __ CallCFunction(grow_stack, num_arguments);
+ __ CallCFunction(grow_stack, kNumArguments);
// Restore regexp registers.
__ MultiPop(regexp_registers);
- // If return nullptr, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
+ // If nullptr is returned, we have failed to grow the stack, and must exit
+ // with a stack-overflow exception.
__ Branch(&exit_with_exception, eq, v0, Operand(zero_reg));
// Otherwise use return value as new stack pointer.
__ mov(backtrack_stackpointer(), v0);
@@ -1012,7 +1058,7 @@ void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
int target = label->pos();
__ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
} else {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_.get());
Label after_constant;
__ Branch(&after_constant);
int offset = masm_->pc_offset();
@@ -1049,14 +1095,24 @@ void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(int reg) {
__ Ld(current_input_offset(), register_location(reg));
}
+void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(a0, Operand(ref));
+ __ Ld(a0, MemOperand(a0));
+ __ Dsubu(a0, backtrack_stackpointer(), a0);
+ __ Sd(a0, register_location(reg));
+}
void RegExpMacroAssemblerMIPS::ReadStackPointerFromRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(a0, Operand(ref));
+ __ Ld(a0, MemOperand(a0));
__ Ld(backtrack_stackpointer(), register_location(reg));
- __ Ld(a0, MemOperand(frame_pointer(), kStackHighEnd));
__ Daddu(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
}
-
void RegExpMacroAssemblerMIPS::SetCurrentPositionFromEnd(int by) {
Label after_position;
__ Branch(&after_position,
@@ -1104,14 +1160,6 @@ void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
}
}
-
-void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
- __ Ld(a1, MemOperand(frame_pointer(), kStackHighEnd));
- __ Dsubu(a0, backtrack_stackpointer(), a1);
- __ Sd(a0, register_location(reg));
-}
-
-
bool RegExpMacroAssemblerMIPS::CanReadUnaligned() {
return false;
}
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index 348d52724b..a6a56235cf 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -5,9 +5,7 @@
#ifndef V8_REGEXP_MIPS64_REGEXP_MACRO_ASSEMBLER_MIPS64_H_
#define V8_REGEXP_MIPS64_REGEXP_MACRO_ASSEMBLER_MIPS64_H_
-#include "src/base/strings.h"
#include "src/codegen/macro-assembler.h"
-#include "src/codegen/mips64/assembler-mips64.h"
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
@@ -96,35 +94,39 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
static const int kFramePointer = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
- // Registers s0 to s7, fp, and ra.
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
// TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp.
- static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
+ static const int kReturnAddress = kStoredRegisters + 9 * kSystemPointerSize;
// Stack frame header.
static const int kStackFrameHeader = kReturnAddress;
- // Stack parameters placed by caller.
- static const int kIsolate = kStackFrameHeader + kPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
- static const int kDirectCall = kFramePointer - kPointerSize;
- static const int kStackHighEnd = kDirectCall - kPointerSize;
- static const int kNumOutputRegisters = kStackHighEnd - kPointerSize;
- static const int kRegisterOutput = kNumOutputRegisters - kPointerSize;
- static const int kInputEnd = kRegisterOutput - kPointerSize;
- static const int kInputStart = kInputEnd - kPointerSize;
- static const int kStartIndex = kInputStart - kPointerSize;
- static const int kInputString = kStartIndex - kPointerSize;
+ static const int kIsolate = kFramePointer - kSystemPointerSize;
+ static const int kDirectCall = kIsolate - kSystemPointerSize;
+ static const int kNumOutputRegisters = kDirectCall - kSystemPointerSize;
+ static const int kRegisterOutput = kNumOutputRegisters - kSystemPointerSize;
+ static const int kInputEnd = kRegisterOutput - kSystemPointerSize;
+ static const int kInputStart = kInputEnd - kSystemPointerSize;
+ static const int kStartIndex = kInputStart - kSystemPointerSize;
+ static const int kInputString = kStartIndex - kSystemPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kSuccessfulCaptures = kInputString - kSystemPointerSize;
+ static const int kStringStartMinusOne =
+ kSuccessfulCaptures - kSystemPointerSize;
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // Stores the initial value of the regexp stack pointer in a
+ // position-independent representation (in case the regexp stack grows and
+ // thus moves).
+ static const int kRegExpStackBasePointer =
+ kBacktrackCount - kSystemPointerSize;
+
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
+ static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
@@ -144,27 +146,27 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
// Register holding the current input position as negative offset from
// the end of the string.
- inline Register current_input_offset() { return a6; }
+ static constexpr Register current_input_offset() { return a6; }
// The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return a7; }
+ static constexpr Register current_character() { return a7; }
// Register holding address of the end of the input string.
- inline Register end_of_input_address() { return t2; }
+ static constexpr Register end_of_input_address() { return t2; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
- inline Register frame_pointer() { return fp; }
+ static constexpr Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- inline Register backtrack_stackpointer() { return t0; }
+ static constexpr Register backtrack_stackpointer() { return t0; }
// Register holding pointer to the current code object.
- inline Register code_pointer() { return a5; }
+ static constexpr Register code_pointer() { return a5; }
// Byte size of chars in the string to match (decided by the Mode argument).
- inline int char_size() { return static_cast<int>(mode_); }
+ inline int char_size() const { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is nullptr, in which case it is a conditional Backtrack.
@@ -190,19 +192,25 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
// and increments it by a word size.
inline void Pop(Register target);
+ void LoadRegExpStackPointerFromMemory(Register dst);
+ void StoreRegExpStackPointerToMemory(Register src, Register scratch);
+ void PushRegExpBasePointer(Register scratch1, Register scratch2);
+ void PopRegExpBasePointer(Register scratch1, Register scratch2);
+
Isolate* isolate() const { return masm_->isolate(); }
- MacroAssembler* masm_;
+ const std::unique_ptr<MacroAssembler> masm_;
+ const NoRootArrayScope no_root_array_scope_;
// Which mode to generate code for (Latin1 or UC16).
- Mode mode_;
+ const Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1).
- int num_saved_registers_;
+ const int num_saved_registers_;
// Labels used internally.
Label entry_label_;
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index bb82c270b7..b7347e5fdf 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -6,14 +6,13 @@
#include "src/regexp/ppc/regexp-macro-assembler-ppc.h"
-#include "src/base/bits.h"
-#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
+#include "src/codegen/ppc/assembler-ppc-inl.h"
+#include "src/heap/factory.h"
#include "src/logging/log.h"
-#include "src/regexp/regexp-macro-assembler.h"
+#include "src/objects/code-inl.h"
#include "src/regexp/regexp-stack.h"
#include "src/snapshot/embedded/embedded-data.h"
-#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -100,8 +99,10 @@ RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ masm_(std::make_unique<MacroAssembler>(
+ isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -111,8 +112,6 @@ RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Isolate* isolate, Zone* zone,
backtrack_label_(),
exit_label_(),
internal_failure_label_() {
- masm_->set_root_array_available(false);
-
DCHECK_EQ(0, registers_to_save % 2);
@@ -126,7 +125,6 @@ RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Isolate* isolate, Zone* zone,
}
RegExpMacroAssemblerPPC::~RegExpMacroAssemblerPPC() {
- delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
@@ -362,7 +360,7 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
__ mov(r6, Operand(ExternalReference::isolate_address(isolate())));
{
- AllowExternalCallThatCantCauseGC scope(masm_);
+ AllowExternalCallThatCantCauseGC scope(masm_.get());
ExternalReference function =
unicode ? ExternalReference::re_case_insensitive_compare_unicode(
isolate())
@@ -652,6 +650,42 @@ void RegExpMacroAssemblerPPC::Fail() {
__ b(&exit_label_);
}
+void RegExpMacroAssemblerPPC::LoadRegExpStackPointerFromMemory(Register dst) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ mov(dst, Operand(ref));
+ __ LoadU64(dst, MemOperand(dst));
+}
+
+void RegExpMacroAssemblerPPC::StoreRegExpStackPointerToMemory(
+ Register src, Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ mov(scratch, Operand(ref));
+ __ StoreU64(src, MemOperand(scratch));
+}
+
+void RegExpMacroAssemblerPPC::PushRegExpBasePointer(Register stack_pointer,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(scratch, Operand(ref));
+ __ LoadU64(scratch, MemOperand(scratch));
+ __ SubS64(scratch, stack_pointer, scratch);
+ __ StoreU64(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+}
+
+void RegExpMacroAssemblerPPC::PopRegExpBasePointer(Register stack_pointer_out,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ LoadU64(stack_pointer_out,
+ MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ mov(scratch, Operand(ref));
+ __ LoadU64(scratch, MemOperand(scratch));
+ __ AddS64(stack_pointer_out, stack_pointer_out, scratch);
+ StoreRegExpStackPointerToMemory(stack_pointer_out, scratch);
+}
Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
Label return_r3;
@@ -670,7 +704,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
// Tell the system that we have a stack frame. Because the type
// is MANUAL, no is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
+ FrameScope scope(masm_.get(), StackFrame::MANUAL);
// Ensure register assigments are consistent with callee save mask
DCHECK(r25.bit() & kRegExpCalleeSaved);
@@ -705,34 +739,48 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ push(r3); // Make room for "string start - 1" constant.
STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
__ push(r3); // The backtrack counter.
+ STATIC_ASSERT(kRegExpStackBasePointer ==
+ kBacktrackCount - kSystemPointerSize);
+ __ push(r3); // The regexp stack base ptr.
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_jslimit(isolate());
- __ mov(r3, Operand(stack_limit));
- __ LoadU64(r3, MemOperand(r3));
- __ sub(r3, sp, r3, LeaveOE, SetRC);
- // Handle it if the stack pointer is already below the stack limit.
- __ ble(&stack_limit_hit, cr0);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ CmpU64(r3, Operand(num_registers_ * kSystemPointerSize), r0);
- __ bge(&stack_ok);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ li(r3, Operand(EXCEPTION));
- __ b(&return_r3);
-
- __ bind(&stack_limit_hit);
- CallCheckStackGuardState(r3);
- __ cmpi(r3, Operand::Zero());
- // If returned value is non-zero, we exit with the returned value as result.
- __ bne(&return_r3);
+ // Initialize backtrack stack pointer. It must not be clobbered from here
+ // on. Note the backtrack_stackpointer is callee-saved.
+ STATIC_ASSERT(backtrack_stackpointer() == r29);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
+ // Store the regexp base pointer - we'll later restore it / write it to
+ // memory when returning from this irregexp code object.
+ PushRegExpBasePointer(backtrack_stackpointer(), r4);
- __ bind(&stack_ok);
+ {
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit, stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(isolate());
+ __ mov(r3, Operand(stack_limit));
+ __ LoadU64(r3, MemOperand(r3));
+ __ sub(r3, sp, r3, LeaveOE, SetRC);
+ // Handle it if the stack pointer is already below the stack limit.
+ __ ble(&stack_limit_hit, cr0);
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ CmpU64(r3, Operand(num_registers_ * kSystemPointerSize), r0);
+ __ bge(&stack_ok);
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ li(r3, Operand(EXCEPTION));
+ __ b(&return_r3);
+
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState(r3);
+ __ cmpi(r3, Operand::Zero());
+ // If returned value is non-zero, we exit with the returned value as
+ // result.
+ __ bne(&return_r3);
+
+ __ bind(&stack_ok);
+ }
// Allocate space on stack for registers.
__ AddS64(sp, sp, Operand(-num_registers_ * kSystemPointerSize), r0);
@@ -759,18 +807,21 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ cmpi(r4, Operand::Zero());
- __ bne(&load_char_start_regexp);
- __ li(current_character(), Operand('\n'));
- __ b(&start_regexp);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
+ Label load_char_start_regexp;
+ {
+ Label start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ cmpi(r4, Operand::Zero());
+ __ bne(&load_char_start_regexp);
+ __ li(current_character(), Operand('\n'));
+ __ b(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+ }
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
@@ -792,10 +843,6 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
}
}
- // Initialize backtrack stack pointer.
- __ LoadU64(backtrack_stackpointer(),
- MemOperand(frame_pointer(), kStackHighEnd));
-
__ b(&start_label_);
// Exit code:
@@ -866,6 +913,10 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
// Prepare r3 to initialize registers with its value in the next run.
__ LoadU64(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), r5);
+
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// r25: capture start index
@@ -896,6 +947,10 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
}
__ bind(&return_r3);
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), r5);
+
// Skip sp past regexp registers and local variables..
__ mr(sp, frame_pointer());
// Restore registers r25..r31 and return (restoring lr to pc).
@@ -916,12 +971,16 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), r4);
+
CallCheckStackGuardState(r3);
__ cmpi(r3, Operand::Zero());
// If returning non-zero, we should end execution with the given
// result as return value.
__ bne(&return_r3);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
// String might have moved: Reload end of string from frame.
__ LoadU64(end_of_input_address(),
MemOperand(frame_pointer(), kInputEnd));
@@ -932,17 +991,18 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
if (stack_overflow_label_.is_linked()) {
SafeCallTarget(&stack_overflow_label_);
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, r3);
- __ mr(r3, backtrack_stackpointer());
- __ addi(r4, frame_pointer(), Operand(kStackHighEnd));
- __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
+ // Call GrowStack(isolate).
+
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), r4);
+
+ static constexpr int kNumArguments = 1;
+ __ PrepareCallCFunction(kNumArguments, r3);
+ __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
ExternalReference grow_stack =
ExternalReference::re_grow_stack(isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // If return nullptr, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
+ __ CallCFunction(grow_stack, kNumArguments);
+ // If nullptr is returned, we have failed to grow the stack, and must exit
+ // with a stack-overflow exception.
__ cmpi(r3, Operand::Zero());
__ beq(&exit_with_exception);
// Otherwise use return value as new stack pointer.
@@ -1045,14 +1105,24 @@ void RegExpMacroAssemblerPPC::ReadCurrentPositionFromRegister(int reg) {
__ LoadU64(current_input_offset(), register_location(reg), r0);
}
+void RegExpMacroAssemblerPPC::WriteStackPointerToRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(r4, Operand(ref));
+ __ LoadU64(r4, MemOperand(r4));
+ __ SubS64(r3, backtrack_stackpointer(), r4);
+ __ StoreU64(r3, register_location(reg));
+}
void RegExpMacroAssemblerPPC::ReadStackPointerFromRegister(int reg) {
- __ LoadU64(backtrack_stackpointer(), register_location(reg), r0);
- __ LoadU64(r3, MemOperand(frame_pointer(), kStackHighEnd));
- __ add(backtrack_stackpointer(), backtrack_stackpointer(), r3);
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(r3, Operand(ref));
+ __ LoadU64(r3, MemOperand(r3));
+ __ LoadU64(backtrack_stackpointer(), register_location(reg));
+ __ AddS64(backtrack_stackpointer(), backtrack_stackpointer(), r3);
}
-
void RegExpMacroAssemblerPPC::SetCurrentPositionFromEnd(int by) {
Label after_position;
__ CmpS64(current_input_offset(), Operand(-by * char_size()), r0);
@@ -1099,14 +1169,6 @@ void RegExpMacroAssemblerPPC::ClearRegisters(int reg_from, int reg_to) {
}
}
-
-void RegExpMacroAssemblerPPC::WriteStackPointerToRegister(int reg) {
- __ LoadU64(r4, MemOperand(frame_pointer(), kStackHighEnd));
- __ sub(r3, backtrack_stackpointer(), r4);
- __ StoreU64(r3, register_location(reg), r0);
-}
-
-
// Private methods:
void RegExpMacroAssemblerPPC::CallCheckStackGuardState(Register scratch) {
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index 430c3a241c..212a1f4051 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -5,9 +5,7 @@
#ifndef V8_REGEXP_PPC_REGEXP_MACRO_ASSEMBLER_PPC_H_
#define V8_REGEXP_PPC_REGEXP_MACRO_ASSEMBLER_PPC_H_
-#include "src/base/strings.h"
#include "src/codegen/macro-assembler.h"
-#include "src/codegen/ppc/assembler-ppc.h"
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
@@ -91,20 +89,16 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerPPC
static const int kFramePointer = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
- // Register 25..31.
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
static const int kReturnAddress = kStoredRegisters + 7 * kSystemPointerSize;
static const int kCallerFrame = kReturnAddress + kSystemPointerSize;
- // Stack parameters placed by caller.
- static const int kIsolate =
- kCallerFrame + kStackFrameExtraParamSlot * kSystemPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
- static const int kDirectCall = kFramePointer - kSystemPointerSize;
- static const int kStackHighEnd = kDirectCall - kSystemPointerSize;
- static const int kNumOutputRegisters = kStackHighEnd - kSystemPointerSize;
+ static const int kIsolate = kFramePointer - kSystemPointerSize;
+ static const int kDirectCall = kIsolate - kSystemPointerSize;
+ static const int kNumOutputRegisters = kDirectCall - kSystemPointerSize;
static const int kRegisterOutput = kNumOutputRegisters - kSystemPointerSize;
static const int kInputEnd = kRegisterOutput - kSystemPointerSize;
static const int kInputStart = kInputEnd - kSystemPointerSize;
@@ -116,8 +110,14 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerPPC
static const int kStringStartMinusOne =
kSuccessfulCaptures - kSystemPointerSize;
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // Stores the initial value of the regexp stack pointer in a
+ // position-independent representation (in case the regexp stack grows and
+ // thus moves).
+ static const int kRegExpStackBasePointer =
+ kBacktrackCount - kSystemPointerSize;
+
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
+ static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
@@ -137,27 +137,27 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerPPC
// Register holding the current input position as negative offset from
// the end of the string.
- inline Register current_input_offset() { return r27; }
+ static constexpr Register current_input_offset() { return r27; }
// The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return r28; }
+ static constexpr Register current_character() { return r28; }
// Register holding address of the end of the input string.
- inline Register end_of_input_address() { return r30; }
+ static constexpr Register end_of_input_address() { return r30; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
- inline Register frame_pointer() { return fp; }
+ static constexpr Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- inline Register backtrack_stackpointer() { return r29; }
+ static constexpr Register backtrack_stackpointer() { return r29; }
// Register holding pointer to the current code object.
- inline Register code_pointer() { return r26; }
+ static constexpr Register code_pointer() { return r26; }
// Byte size of chars in the string to match (decided by the Mode argument)
- inline int char_size() { return static_cast<int>(mode_); }
+ inline int char_size() const { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is nullptr, in which case it is a conditional Backtrack.
@@ -177,19 +177,25 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerPPC
// and increments it by a word size.
inline void Pop(Register target);
+ void LoadRegExpStackPointerFromMemory(Register dst);
+ void StoreRegExpStackPointerToMemory(Register src, Register scratch);
+ void PushRegExpBasePointer(Register stack_pointer, Register scratch);
+ void PopRegExpBasePointer(Register stack_pointer_out, Register scratch);
+
Isolate* isolate() const { return masm_->isolate(); }
- MacroAssembler* masm_;
+ const std::unique_ptr<MacroAssembler> masm_;
+ const NoRootArrayScope no_root_array_scope_;
// Which mode to generate code for (Latin1 or UC16).
- Mode mode_;
+ const Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1)
- int num_saved_registers_;
+ const int num_saved_registers_;
// Labels used internally.
Label entry_label_;
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator-inl.h b/deps/v8/src/regexp/regexp-bytecode-generator-inl.h
index bfdd9df93c..dfdc2da476 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator-inl.h
+++ b/deps/v8/src/regexp/regexp-bytecode-generator-inl.h
@@ -23,29 +23,29 @@ void RegExpBytecodeGenerator::Emit(uint32_t byte, int32_t twenty_four_bits) {
}
void RegExpBytecodeGenerator::Emit16(uint32_t word) {
- DCHECK(pc_ <= buffer_.length());
- if (pc_ + 1 >= buffer_.length()) {
- Expand();
+ DCHECK(pc_ <= static_cast<int>(buffer_.size()));
+ if (pc_ + 1 >= static_cast<int>(buffer_.size())) {
+ ExpandBuffer();
}
- *reinterpret_cast<uint16_t*>(buffer_.begin() + pc_) = word;
+ *reinterpret_cast<uint16_t*>(buffer_.data() + pc_) = word;
pc_ += 2;
}
void RegExpBytecodeGenerator::Emit8(uint32_t word) {
- DCHECK(pc_ <= buffer_.length());
- if (pc_ == buffer_.length()) {
- Expand();
+ DCHECK(pc_ <= static_cast<int>(buffer_.size()));
+ if (pc_ == static_cast<int>(buffer_.size())) {
+ ExpandBuffer();
}
- *reinterpret_cast<unsigned char*>(buffer_.begin() + pc_) = word;
+ *reinterpret_cast<unsigned char*>(buffer_.data() + pc_) = word;
pc_ += 1;
}
void RegExpBytecodeGenerator::Emit32(uint32_t word) {
- DCHECK(pc_ <= buffer_.length());
- if (pc_ + 3 >= buffer_.length()) {
- Expand();
+ DCHECK(pc_ <= static_cast<int>(buffer_.size()));
+ if (pc_ + 3 >= static_cast<int>(buffer_.size())) {
+ ExpandBuffer();
}
- *reinterpret_cast<uint32_t*>(buffer_.begin() + pc_) = word;
+ *reinterpret_cast<uint32_t*>(buffer_.data() + pc_) = word;
pc_ += 4;
}
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator.cc b/deps/v8/src/regexp/regexp-bytecode-generator.cc
index c5ad2bfba5..c2b34fa653 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator.cc
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.cc
@@ -16,7 +16,7 @@ namespace internal {
RegExpBytecodeGenerator::RegExpBytecodeGenerator(Isolate* isolate, Zone* zone)
: RegExpMacroAssembler(isolate, zone),
- buffer_(base::Vector<byte>::New(1024)),
+ buffer_(kInitialBufferSize, zone),
pc_(0),
advance_current_end_(kInvalidPC),
jump_edges_(zone),
@@ -24,7 +24,6 @@ RegExpBytecodeGenerator::RegExpBytecodeGenerator(Isolate* isolate, Zone* zone)
RegExpBytecodeGenerator::~RegExpBytecodeGenerator() {
if (backtrack_.is_linked()) backtrack_.Unuse();
- buffer_.Dispose();
}
RegExpBytecodeGenerator::IrregexpImplementation
@@ -39,8 +38,8 @@ void RegExpBytecodeGenerator::Bind(Label* l) {
int pos = l->pos();
while (pos != 0) {
int fixup = pos;
- pos = *reinterpret_cast<int32_t*>(buffer_.begin() + fixup);
- *reinterpret_cast<uint32_t*>(buffer_.begin() + fixup) = pc_;
+ pos = *reinterpret_cast<int32_t*>(buffer_.data() + fixup);
+ *reinterpret_cast<uint32_t*>(buffer_.data() + fixup) = pc_;
jump_edges_.emplace(fixup, pc_);
}
}
@@ -383,7 +382,7 @@ Handle<HeapObject> RegExpBytecodeGenerator::GetCode(Handle<String> source) {
Handle<ByteArray> array;
if (FLAG_regexp_peephole_optimization) {
array = RegExpBytecodePeepholeOptimization::OptimizeBytecode(
- isolate_, zone(), source, buffer_.begin(), length(), jump_edges_);
+ isolate_, zone(), source, buffer_.data(), length(), jump_edges_);
} else {
array = isolate_->factory()->NewByteArray(length());
Copy(array->GetDataStartAddress());
@@ -395,14 +394,13 @@ Handle<HeapObject> RegExpBytecodeGenerator::GetCode(Handle<String> source) {
int RegExpBytecodeGenerator::length() { return pc_; }
void RegExpBytecodeGenerator::Copy(byte* a) {
- MemCopy(a, buffer_.begin(), length());
+ MemCopy(a, buffer_.data(), length());
}
-void RegExpBytecodeGenerator::Expand() {
- base::Vector<byte> old_buffer = buffer_;
- buffer_ = base::Vector<byte>::New(old_buffer.length() * 2);
- MemCopy(buffer_.begin(), old_buffer.begin(), old_buffer.length());
- old_buffer.Dispose();
+void RegExpBytecodeGenerator::ExpandBuffer() {
+ // TODO(jgruber): The growth strategy could be smarter for large sizes.
+ // TODO(jgruber): It's not necessary to default-initialize new elements.
+ buffer_.resize(buffer_.size() * 2);
}
} // namespace internal
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator.h b/deps/v8/src/regexp/regexp-bytecode-generator.h
index 310ab32cec..551421ac7b 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator.h
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.h
@@ -83,7 +83,8 @@ class V8_EXPORT_PRIVATE RegExpBytecodeGenerator : public RegExpMacroAssembler {
Handle<HeapObject> GetCode(Handle<String> source) override;
private:
- void Expand();
+ void ExpandBuffer();
+
// Code and bitmap emission.
inline void EmitOrLink(Label* label);
inline void Emit32(uint32_t x);
@@ -96,7 +97,9 @@ class V8_EXPORT_PRIVATE RegExpBytecodeGenerator : public RegExpMacroAssembler {
void Copy(byte* a);
// The buffer into which code and relocation info are generated.
- base::Vector<byte> buffer_;
+ static constexpr int kInitialBufferSize = 1024;
+ ZoneVector<byte> buffer_;
+
// The program counter.
int pc_;
Label backtrack_;
diff --git a/deps/v8/src/regexp/regexp-interpreter.cc b/deps/v8/src/regexp/regexp-interpreter.cc
index f9a959d258..be3bb45a5f 100644
--- a/deps/v8/src/regexp/regexp-interpreter.cc
+++ b/deps/v8/src/regexp/regexp-interpreter.cc
@@ -1060,12 +1060,12 @@ IrregexpInterpreter::Result IrregexpInterpreter::Match(
if (FLAG_regexp_tier_up) regexp.TierUpTick();
bool is_one_byte = String::IsOneByteRepresentationUnderneath(subject_string);
- ByteArray code_array = ByteArray::cast(regexp.Bytecode(is_one_byte));
- int total_register_count = regexp.MaxRegisterCount();
+ ByteArray code_array = ByteArray::cast(regexp.bytecode(is_one_byte));
+ int total_register_count = regexp.max_register_count();
return MatchInternal(isolate, code_array, subject_string, output_registers,
output_register_count, total_register_count,
- start_position, call_origin, regexp.BacktrackLimit());
+ start_position, call_origin, regexp.backtrack_limit());
}
IrregexpInterpreter::Result IrregexpInterpreter::MatchInternal(
@@ -1111,7 +1111,7 @@ IrregexpInterpreter::Result IrregexpInterpreter::MatchInternal(
// builtin.
IrregexpInterpreter::Result IrregexpInterpreter::MatchForCallFromJs(
Address subject, int32_t start_position, Address, Address,
- int* output_registers, int32_t output_register_count, Address,
+ int* output_registers, int32_t output_register_count,
RegExp::CallOrigin call_origin, Isolate* isolate, Address regexp) {
DCHECK_NOT_NULL(isolate);
DCHECK_NOT_NULL(output_registers);
diff --git a/deps/v8/src/regexp/regexp-interpreter.h b/deps/v8/src/regexp/regexp-interpreter.h
index a4d79184b0..e9dedd781b 100644
--- a/deps/v8/src/regexp/regexp-interpreter.h
+++ b/deps/v8/src/regexp/regexp-interpreter.h
@@ -36,9 +36,8 @@ class V8_EXPORT_PRIVATE IrregexpInterpreter : public AllStatic {
// RETRY is returned if a retry through the runtime is needed (e.g. when
// interrupts have been scheduled or the regexp is marked for tier-up).
//
- // Arguments input_start, input_end and backtrack_stack are
- // unused. They are only passed to match the signature of the native irregex
- // code.
+ // Arguments input_start and input_end are unused. They are only passed to
+ // match the signature of the native irregex code.
//
// Arguments output_registers and output_register_count describe the results
// array, which will contain register values of all captures if SUCCESS is
@@ -47,7 +46,6 @@ class V8_EXPORT_PRIVATE IrregexpInterpreter : public AllStatic {
Address input_start, Address input_end,
int* output_registers,
int32_t output_register_count,
- Address backtrack_stack,
RegExp::CallOrigin call_origin,
Isolate* isolate, Address regexp);
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index 891079b357..0cd103da10 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -306,23 +306,21 @@ int NativeRegExpMacroAssembler::Execute(
String input, // This needs to be the unpacked (sliced, cons) string.
int start_offset, const byte* input_start, const byte* input_end,
int* output, int output_size, Isolate* isolate, JSRegExp regexp) {
- // Ensure that the minimum stack has been allocated.
RegExpStackScope stack_scope(isolate);
- Address stack_base = stack_scope.stack()->stack_base();
bool is_one_byte = String::IsOneByteRepresentationUnderneath(input);
- Code code = FromCodeT(CodeT::cast(regexp.Code(is_one_byte)));
+ Code code = FromCodeT(CodeT::cast(regexp.code(is_one_byte)));
RegExp::CallOrigin call_origin = RegExp::CallOrigin::kFromRuntime;
- using RegexpMatcherSig = int(
- Address input_string, int start_offset, const byte* input_start,
- const byte* input_end, int* output, int output_size, Address stack_base,
- int call_origin, Isolate* isolate, Address regexp);
+ using RegexpMatcherSig =
+ // NOLINTNEXTLINE(readability/casting)
+ int(Address input_string, int start_offset, const byte* input_start,
+ const byte* input_end, int* output, int output_size, int call_origin,
+ Isolate* isolate, Address regexp);
auto fn = GeneratedCode<RegexpMatcherSig>::FromCode(code);
- int result =
- fn.Call(input.ptr(), start_offset, input_start, input_end, output,
- output_size, stack_base, call_origin, isolate, regexp.ptr());
+ int result = fn.Call(input.ptr(), start_offset, input_start, input_end,
+ output, output_size, call_origin, isolate, regexp.ptr());
DCHECK_GE(result, SMALLEST_REGEXP_RESULT);
if (result == EXCEPTION && !isolate->has_pending_exception()) {
@@ -382,22 +380,23 @@ const byte NativeRegExpMacroAssembler::word_character_map[] = {
};
// clang-format on
-Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
- Address* stack_base,
- Isolate* isolate) {
+Address NativeRegExpMacroAssembler::GrowStack(Isolate* isolate) {
+ DisallowGarbageCollection no_gc;
+
RegExpStack* regexp_stack = isolate->regexp_stack();
- size_t size = regexp_stack->stack_capacity();
- Address old_stack_base = regexp_stack->stack_base();
- DCHECK(old_stack_base == *stack_base);
- DCHECK(stack_pointer <= old_stack_base);
- DCHECK(static_cast<size_t>(old_stack_base - stack_pointer) <= size);
- Address new_stack_base = regexp_stack->EnsureCapacity(size * 2);
- if (new_stack_base == kNullAddress) {
- return kNullAddress;
- }
- *stack_base = new_stack_base;
- intptr_t stack_content_size = old_stack_base - stack_pointer;
- return new_stack_base - stack_content_size;
+ const size_t old_size = regexp_stack->memory_size();
+
+#ifdef DEBUG
+ const Address old_stack_top = regexp_stack->memory_top();
+ const Address old_stack_pointer = regexp_stack->stack_pointer();
+ CHECK_LE(old_stack_pointer, old_stack_top);
+ CHECK_LE(static_cast<size_t>(old_stack_top - old_stack_pointer), old_size);
+#endif // DEBUG
+
+ Address new_stack_base = regexp_stack->EnsureCapacity(old_size * 2);
+ if (new_stack_base == kNullAddress) return kNullAddress;
+
+ return regexp_stack->stack_pointer();
}
} // namespace internal
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index 9bd9ba615e..af3cc2f5ca 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -281,13 +281,11 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
int* offsets_vector, int offsets_vector_length,
int previous_index, Isolate* isolate);
- // Called from RegExp if the backtrack stack limit is hit.
- // Tries to expand the stack. Returns the new stack-pointer if
- // successful, and updates the stack_top address, or returns 0 if unable
- // to grow the stack.
+ // Called from RegExp if the backtrack stack limit is hit. Tries to expand
+ // the stack. Returns the new stack-pointer if successful, or returns 0 if
+ // unable to grow the stack.
// This function must not trigger a garbage collection.
- static Address GrowStack(Address stack_pointer, Address* stack_top,
- Isolate* isolate);
+ static Address GrowStack(Isolate* isolate);
static int CheckStackGuardState(Isolate* isolate, int start_index,
RegExp::CallOrigin call_origin,
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 1d9f24b792..4b0e554764 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -23,6 +23,13 @@ namespace internal {
namespace {
+// Whether we're currently inside the ClassEscape production
+// (tc39.es/ecma262/#prod-annexB-CharacterEscape).
+enum class InClassEscapeState {
+ kInClass,
+ kNotInClass,
+};
+
// A BufferedZoneList is an automatically growing list, just like (and backed
// by) a ZoneList, that is optimized for the case of adding and removing
// a single element. The last element added is stored outside the backing list,
@@ -255,10 +262,6 @@ class RegExpParserImpl final {
// out parameters.
bool ParseIntervalQuantifier(int* min_out, int* max_out);
- // Parses and returns a single escaped character. The character
- // must not be 'b' or 'B' since they are usually handle specially.
- base::uc32 ParseClassCharacterEscape();
-
// Checks whether the following is a length-digit hexadecimal number,
// and sets the value if it is.
bool ParseHexEscape(int length, base::uc32* value);
@@ -271,7 +274,6 @@ class RegExpParserImpl final {
const ZoneVector<char>& name_1,
const ZoneVector<char>& name_2);
- RegExpTree* GetPropertySequence(const ZoneVector<char>& name_1);
RegExpTree* ParseCharacterClass(const RegExpBuilder* state);
base::uc32 ParseOctalLiteral();
@@ -287,8 +289,15 @@ class RegExpParserImpl final {
void ParseClassEscape(ZoneList<CharacterRange>* ranges, Zone* zone,
bool add_unicode_case_equivalents, base::uc32* char_out,
bool* is_class_escape);
-
- char ParseClassEscape();
+ // Returns true iff parsing was successful.
+ bool TryParseCharacterClassEscape(base::uc32 next,
+ InClassEscapeState in_class_escape_state,
+ ZoneList<CharacterRange>* ranges,
+ Zone* zone,
+ bool add_unicode_case_equivalents);
+ // Parses and returns a single escaped character.
+ base::uc32 ParseCharacterEscape(InClassEscapeState in_class_escape_state,
+ bool* is_escaped_unicode_character);
RegExpTree* ReportError(RegExpError error);
void Advance();
@@ -335,7 +344,7 @@ class RegExpParserImpl final {
// Returns true iff the pattern contains named captures. May call
// ScanForCaptures to look ahead at the remaining pattern.
- bool HasNamedCaptures();
+ bool HasNamedCaptures(InClassEscapeState in_class_escape_state);
Zone* zone() const { return zone_; }
@@ -350,7 +359,7 @@ class RegExpParserImpl final {
return input_[index];
}
int input_length() const { return input_length_; }
- void ScanForCaptures();
+ void ScanForCaptures(InClassEscapeState in_class_escape_state);
struct RegExpCaptureNameLess {
bool operator()(const RegExpCapture* lhs, const RegExpCapture* rhs) const {
@@ -576,6 +585,7 @@ RegExpTree* RegExpParserImpl<CharT>::ParseDisjunction() {
while (true) {
switch (current()) {
case kEndMarker:
+ if (failed()) return nullptr; // E.g. the initial Advance failed.
if (state->IsSubexpression()) {
// Inside a parenthesized group when hitting end of input.
return ReportError(RegExpError::kUnterminatedGroup);
@@ -688,69 +698,19 @@ RegExpTree* RegExpParserImpl<CharT>::ParseDisjunction() {
switch (Next()) {
case kEndMarker:
return ReportError(RegExpError::kEscapeAtEndOfPattern);
- case 'b':
- Advance(2);
- builder->AddAssertion(zone()->template New<RegExpAssertion>(
- RegExpAssertion::BOUNDARY));
- continue;
- case 'B':
- Advance(2);
- builder->AddAssertion(zone()->template New<RegExpAssertion>(
- RegExpAssertion::NON_BOUNDARY));
- continue;
// AtomEscape ::
- // CharacterClassEscape
+ // [+UnicodeMode] DecimalEscape
+ // [~UnicodeMode] DecimalEscape but only if the CapturingGroupNumber
+ // of DecimalEscape is ≤ NcapturingParens
+ // CharacterEscape (some cases of this mixed in too)
//
- // CharacterClassEscape :: one of
- // d D s S w W
- case 'd':
- case 'D':
- case 's':
- case 'S':
- case 'w':
- case 'W': {
- base::uc32 c = Next();
- Advance(2);
- ZoneList<CharacterRange>* ranges =
- zone()->template New<ZoneList<CharacterRange>>(2, zone());
- CharacterRange::AddClassEscape(
- c, ranges, unicode() && builder->ignore_case(), zone());
- RegExpCharacterClass* cc =
- zone()->template New<RegExpCharacterClass>(zone(), ranges);
- builder->AddCharacterClass(cc);
- break;
- }
- case 'p':
- case 'P': {
- base::uc32 p = Next();
- Advance(2);
- if (unicode()) {
- ZoneList<CharacterRange>* ranges =
- zone()->template New<ZoneList<CharacterRange>>(2, zone());
- ZoneVector<char> name_1(zone());
- ZoneVector<char> name_2(zone());
- if (ParsePropertyClassName(&name_1, &name_2)) {
- if (AddPropertyClassRange(ranges, p == 'P', name_1, name_2)) {
- RegExpCharacterClass* cc =
- zone()->template New<RegExpCharacterClass>(zone(),
- ranges);
- builder->AddCharacterClass(cc);
- break;
- }
- if (p == 'p' && name_2.empty()) {
- RegExpTree* sequence = GetPropertySequence(name_1);
- if (sequence != nullptr) {
- builder->AddAtom(sequence);
- break;
- }
- }
- }
- return ReportError(RegExpError::kInvalidPropertyName);
- } else {
- builder->AddCharacter(p);
- }
- break;
- }
+ // TODO(jgruber): It may make sense to disentangle all the different
+ // cases and make the structure mirror the spec, e.g. for AtomEscape:
+ //
+ // if (TryParseDecimalEscape(...)) return;
+ // if (TryParseCharacterClassEscape(...)) return;
+ // if (TryParseCharacterEscape(...)) return;
+ // if (TryParseGroupName(...)) return;
case '1':
case '2':
case '3':
@@ -761,7 +721,8 @@ RegExpTree* RegExpParserImpl<CharT>::ParseDisjunction() {
case '8':
case '9': {
int index = 0;
- bool is_backref = ParseBackReferenceIndex(&index CHECK_FAILED);
+ const bool is_backref =
+ ParseBackReferenceIndex(&index CHECK_FAILED);
if (is_backref) {
if (state->IsInsideCaptureGroup(index)) {
// The back reference is inside the capture group it refers to.
@@ -801,99 +762,77 @@ RegExpTree* RegExpParserImpl<CharT>::ParseDisjunction() {
builder->AddCharacter(octal);
break;
}
- // ControlEscape :: one of
- // f n r t v
- case 'f':
- Advance(2);
- builder->AddCharacter('\f');
- break;
- case 'n':
- Advance(2);
- builder->AddCharacter('\n');
- break;
- case 'r':
- Advance(2);
- builder->AddCharacter('\r');
- break;
- case 't':
+ case 'b':
Advance(2);
- builder->AddCharacter('\t');
- break;
- case 'v':
+ builder->AddAssertion(zone()->template New<RegExpAssertion>(
+ RegExpAssertion::BOUNDARY));
+ continue;
+ case 'B':
Advance(2);
- builder->AddCharacter('\v');
- break;
- case 'c': {
- Advance();
- base::uc32 controlLetter = Next();
- // Special case if it is an ASCII letter.
- // Convert lower case letters to uppercase.
- base::uc32 letter = controlLetter & ~('a' ^ 'A');
- if (letter < 'A' || 'Z' < letter) {
- // controlLetter is not in range 'A'-'Z' or 'a'-'z'.
- // Read the backslash as a literal character instead of as
- // starting an escape.
- // ES#prod-annexB-ExtendedPatternCharacter
- if (unicode()) {
- // With /u, invalid escapes are not treated as identity escapes.
- return ReportError(RegExpError::kInvalidUnicodeEscape);
- }
- builder->AddCharacter('\\');
+ builder->AddAssertion(zone()->template New<RegExpAssertion>(
+ RegExpAssertion::NON_BOUNDARY));
+ continue;
+ // AtomEscape ::
+ // CharacterClassEscape
+ case 'd':
+ case 'D':
+ case 's':
+ case 'S':
+ case 'w':
+ case 'W':
+ case 'p':
+ case 'P': {
+ base::uc32 next = Next();
+ ZoneList<CharacterRange>* ranges =
+ zone()->template New<ZoneList<CharacterRange>>(2, zone());
+ bool add_unicode_case_equivalents =
+ unicode() && builder->ignore_case();
+ bool parsed_character_class_escape = TryParseCharacterClassEscape(
+ next, InClassEscapeState::kNotInClass, ranges, zone(),
+ add_unicode_case_equivalents CHECK_FAILED);
+
+ if (parsed_character_class_escape) {
+ RegExpCharacterClass* cc =
+ zone()->template New<RegExpCharacterClass>(zone(), ranges);
+ builder->AddCharacterClass(cc);
} else {
+ CHECK(!unicode());
Advance(2);
- builder->AddCharacter(controlLetter & 0x1F);
+ builder->AddCharacter(next); // IdentityEscape.
}
break;
}
- case 'x': {
- Advance(2);
- base::uc32 value;
- if (ParseHexEscape(2, &value)) {
- builder->AddCharacter(value);
- } else if (!unicode()) {
- builder->AddCharacter('x');
- } else {
- // With /u, invalid escapes are not treated as identity escapes.
- return ReportError(RegExpError::kInvalidEscape);
- }
- break;
- }
- case 'u': {
- Advance(2);
- base::uc32 value;
- if (ParseUnicodeEscape(&value)) {
- builder->AddEscapedUnicodeCharacter(value);
- } else if (!unicode()) {
- builder->AddCharacter('u');
- } else {
- // With /u, invalid escapes are not treated as identity escapes.
- return ReportError(RegExpError::kInvalidUnicodeEscape);
- }
- break;
- }
- case 'k':
+ // AtomEscape ::
+ // k GroupName
+ case 'k': {
// Either an identity escape or a named back-reference. The two
// interpretations are mutually exclusive: '\k' is interpreted as
// an identity escape for non-Unicode patterns without named
// capture groups, and as the beginning of a named back-reference
// in all other cases.
- if (unicode() || HasNamedCaptures()) {
+ const bool has_named_captures =
+ HasNamedCaptures(InClassEscapeState::kNotInClass CHECK_FAILED);
+ if (unicode() || has_named_captures) {
Advance(2);
ParseNamedBackReference(builder, state CHECK_FAILED);
break;
}
+ }
V8_FALLTHROUGH;
- default:
- Advance();
- // With /u, no identity escapes except for syntax characters
- // are allowed. Otherwise, all identity escapes are allowed.
- if (!unicode() || IsSyntaxCharacterOrSlash(current())) {
- builder->AddCharacter(current());
- Advance();
+ // AtomEscape ::
+ // CharacterEscape
+ default: {
+ bool is_escaped_unicode_character = false;
+ base::uc32 c = ParseCharacterEscape(
+ InClassEscapeState::kNotInClass,
+ &is_escaped_unicode_character CHECK_FAILED);
+ if (is_escaped_unicode_character) {
+ builder->AddEscapedUnicodeCharacter(c);
} else {
- return ReportError(RegExpError::kInvalidEscape);
+ builder->AddCharacter(c);
}
break;
+ }
}
break;
case '{': {
@@ -1052,12 +991,28 @@ static bool IsSpecialClassEscape(base::uc32 c) {
// is called when needed. It can see the difference between capturing and
// noncapturing parentheses and can skip character classes and backslash-escaped
// characters.
+//
+// Important: The scanner has to be in a consistent state when calling
+// ScanForCaptures, e.g. not in the middle of an escape sequence '\['.
template <class CharT>
-void RegExpParserImpl<CharT>::ScanForCaptures() {
+void RegExpParserImpl<CharT>::ScanForCaptures(
+ InClassEscapeState in_class_escape_state) {
DCHECK(!is_scanned_for_captures_);
const int saved_position = position();
// Start with captures started previous to current position
int capture_count = captures_started();
+ // When we start inside a character class, skip everything inside the class.
+ if (in_class_escape_state == InClassEscapeState::kInClass) {
+ int c;
+ while ((c = current()) != kEndMarker) {
+ Advance();
+ if (c == '\\') {
+ Advance();
+ } else {
+ if (c == ']') break;
+ }
+ }
+ }
// Add count of captures after this position.
int n;
while ((n = current()) != kEndMarker) {
@@ -1130,7 +1085,8 @@ bool RegExpParserImpl<CharT>::ParseBackReferenceIndex(int* index_out) {
}
}
if (value > captures_started()) {
- if (!is_scanned_for_captures_) ScanForCaptures();
+ if (!is_scanned_for_captures_)
+ ScanForCaptures(InClassEscapeState::kNotInClass);
if (value > capture_count_) {
Reset(start);
return false;
@@ -1303,14 +1259,14 @@ template <class CharT>
RegExpCapture* RegExpParserImpl<CharT>::GetCapture(int index) {
// The index for the capture groups are one-based. Its index in the list is
// zero-based.
- int know_captures =
+ const int known_captures =
is_scanned_for_captures_ ? capture_count_ : captures_started_;
- DCHECK(index <= know_captures);
+ DCHECK(index <= known_captures);
if (captures_ == nullptr) {
captures_ =
- zone()->template New<ZoneList<RegExpCapture*>>(know_captures, zone());
+ zone()->template New<ZoneList<RegExpCapture*>>(known_captures, zone());
}
- while (captures_->length() < know_captures) {
+ while (captures_->length() < known_captures) {
captures_->Add(zone()->template New<RegExpCapture>(captures_->length() + 1),
zone());
}
@@ -1328,12 +1284,13 @@ ZoneVector<RegExpCapture*>* RegExpParserImpl<CharT>::GetNamedCaptures() const {
}
template <class CharT>
-bool RegExpParserImpl<CharT>::HasNamedCaptures() {
+bool RegExpParserImpl<CharT>::HasNamedCaptures(
+ InClassEscapeState in_class_escape_state) {
if (has_named_captures_ || is_scanned_for_captures_) {
return has_named_captures_;
}
- ScanForCaptures();
+ ScanForCaptures(in_class_escape_state);
DCHECK(is_scanned_for_captures_);
return has_named_captures_;
}
@@ -1739,72 +1696,6 @@ bool RegExpParserImpl<CharT>::AddPropertyClassRange(
}
}
-template <class CharT>
-RegExpTree* RegExpParserImpl<CharT>::GetPropertySequence(
- const ZoneVector<char>& name_1) {
- if (!FLAG_harmony_regexp_sequence) return nullptr;
- const char* name = name_1.data();
- const base::uc32* sequence_list = nullptr;
- RegExpFlags flags = RegExpFlag::kUnicode;
- if (NameEquals(name, "Emoji_Flag_Sequence")) {
- sequence_list = UnicodePropertySequences::kEmojiFlagSequences;
- } else if (NameEquals(name, "Emoji_Tag_Sequence")) {
- sequence_list = UnicodePropertySequences::kEmojiTagSequences;
- } else if (NameEquals(name, "Emoji_ZWJ_Sequence")) {
- sequence_list = UnicodePropertySequences::kEmojiZWJSequences;
- }
- if (sequence_list != nullptr) {
- // TODO(yangguo): this creates huge regexp code. Alternative to this is
- // to create a new operator that checks for these sequences at runtime.
- RegExpBuilder builder(zone(), flags);
- while (true) { // Iterate through list of sequences.
- while (*sequence_list != 0) { // Iterate through sequence.
- builder.AddUnicodeCharacter(*sequence_list);
- sequence_list++;
- }
- sequence_list++;
- if (*sequence_list == 0) break;
- builder.NewAlternative();
- }
- return builder.ToRegExp();
- }
-
- if (NameEquals(name, "Emoji_Keycap_Sequence")) {
- // https://unicode.org/reports/tr51/#def_emoji_keycap_sequence
- // emoji_keycap_sequence := [0-9#*] \x{FE0F 20E3}
- RegExpBuilder builder(zone(), flags);
- ZoneList<CharacterRange>* prefix_ranges =
- zone()->template New<ZoneList<CharacterRange>>(2, zone());
- prefix_ranges->Add(CharacterRange::Range('0', '9'), zone());
- prefix_ranges->Add(CharacterRange::Singleton('#'), zone());
- prefix_ranges->Add(CharacterRange::Singleton('*'), zone());
- builder.AddCharacterClass(
- zone()->template New<RegExpCharacterClass>(zone(), prefix_ranges));
- builder.AddCharacter(0xFE0F);
- builder.AddCharacter(0x20E3);
- return builder.ToRegExp();
- } else if (NameEquals(name, "Emoji_Modifier_Sequence")) {
- // https://unicode.org/reports/tr51/#def_emoji_modifier_sequence
- // emoji_modifier_sequence := emoji_modifier_base emoji_modifier
- RegExpBuilder builder(zone(), flags);
- ZoneList<CharacterRange>* modifier_base_ranges =
- zone()->template New<ZoneList<CharacterRange>>(2, zone());
- LookupPropertyValueName(UCHAR_EMOJI_MODIFIER_BASE, "Y", false,
- modifier_base_ranges, zone());
- builder.AddCharacterClass(zone()->template New<RegExpCharacterClass>(
- zone(), modifier_base_ranges));
- ZoneList<CharacterRange>* modifier_ranges =
- zone()->template New<ZoneList<CharacterRange>>(2, zone());
- LookupPropertyValueName(UCHAR_EMOJI_MODIFIER, "Y", false, modifier_ranges,
- zone());
- builder.AddCharacterClass(
- zone()->template New<RegExpCharacterClass>(zone(), modifier_ranges));
- return builder.ToRegExp();
- }
-
- return nullptr;
-}
-
#else // V8_INTL_SUPPORT
template <class CharT>
@@ -1820,12 +1711,6 @@ bool RegExpParserImpl<CharT>::AddPropertyClassRange(
return false;
}
-template <class CharT>
-RegExpTree* RegExpParserImpl<CharT>::GetPropertySequence(
- const ZoneVector<char>& name) {
- return nullptr;
-}
-
#endif // V8_INTL_SUPPORT
template <class CharT>
@@ -1848,17 +1733,21 @@ bool RegExpParserImpl<CharT>::ParseUnlimitedLengthHexNumber(int max_value,
return true;
}
+// https://tc39.es/ecma262/#prod-CharacterEscape
template <class CharT>
-base::uc32 RegExpParserImpl<CharT>::ParseClassCharacterEscape() {
+base::uc32 RegExpParserImpl<CharT>::ParseCharacterEscape(
+ InClassEscapeState in_class_escape_state,
+ bool* is_escaped_unicode_character) {
DCHECK_EQ('\\', current());
DCHECK(has_next() && !IsSpecialClassEscape(Next()));
+
Advance();
- switch (current()) {
- case 'b':
- Advance();
- return '\b';
- // ControlEscape :: one of
- // f n r t v
+
+ const base::uc32 c = current();
+ switch (c) {
+ // CharacterEscape ::
+ // ControlEscape :: one of
+ // f n r t v
case 'f':
Advance();
return '\f';
@@ -1874,12 +1763,11 @@ base::uc32 RegExpParserImpl<CharT>::ParseClassCharacterEscape() {
case 'v':
Advance();
return '\v';
+ // CharacterEscape ::
+ // c ControlLetter
case 'c': {
base::uc32 controlLetter = Next();
base::uc32 letter = controlLetter & ~('A' ^ 'a');
- // Inside a character class, we also accept digits and underscore as
- // control characters, unless with /u. See Annex B:
- // ES#prod-annexB-ClassControlLetter
if (letter >= 'A' && letter <= 'Z') {
Advance(2);
// Control letters mapped to ASCII control characters in the range
@@ -1888,22 +1776,29 @@ base::uc32 RegExpParserImpl<CharT>::ParseClassCharacterEscape() {
}
if (unicode()) {
// With /u, invalid escapes are not treated as identity escapes.
- ReportError(RegExpError::kInvalidClassEscape);
+ ReportError(RegExpError::kInvalidUnicodeEscape);
return 0;
}
- if ((controlLetter >= '0' && controlLetter <= '9') ||
- controlLetter == '_') {
- Advance(2);
- return controlLetter & 0x1F;
+ if (in_class_escape_state == InClassEscapeState::kInClass) {
+ // Inside a character class, we also accept digits and underscore as
+ // control characters, unless with /u. See Annex B:
+ // ES#prod-annexB-ClassControlLetter
+ if ((controlLetter >= '0' && controlLetter <= '9') ||
+ controlLetter == '_') {
+ Advance(2);
+ return controlLetter & 0x1F;
+ }
}
// We match JSC in reading the backslash as a literal
// character instead of as starting an escape.
- // TODO(v8:6201): Not yet covered by the spec.
return '\\';
}
+ // CharacterEscape ::
+ // 0 [lookahead ∉ DecimalDigit]
+ // [~UnicodeMode] LegacyOctalEscapeSequence
case '0':
- // With /u, \0 is interpreted as NUL if not followed by another digit.
- if (unicode() && !(Next() >= '0' && Next() <= '9')) {
+ // \0 is interpreted as NUL if not followed by another digit.
+ if (Next() < '0' || Next() > '9') {
Advance();
return 0;
}
@@ -1925,6 +1820,8 @@ base::uc32 RegExpParserImpl<CharT>::ParseClassCharacterEscape() {
return 0;
}
return ParseOctalLiteral();
+ // CharacterEscape ::
+ // HexEscapeSequence
case 'x': {
Advance();
base::uc32 value;
@@ -1938,10 +1835,15 @@ base::uc32 RegExpParserImpl<CharT>::ParseClassCharacterEscape() {
// as an identity escape.
return 'x';
}
+ // CharacterEscape ::
+ // RegExpUnicodeEscapeSequence [?UnicodeMode]
case 'u': {
Advance();
base::uc32 value;
- if (ParseUnicodeEscape(&value)) return value;
+ if (ParseUnicodeEscape(&value)) {
+ *is_escaped_unicode_character = true;
+ return value;
+ }
if (unicode()) {
// With /u, invalid escapes are not treated as identity escapes.
ReportError(RegExpError::kInvalidUnicodeEscape);
@@ -1951,68 +1853,124 @@ base::uc32 RegExpParserImpl<CharT>::ParseClassCharacterEscape() {
// as an identity escape.
return 'u';
}
- default: {
- base::uc32 result = current();
- // With /u, no identity escapes except for syntax characters and '-' are
- // allowed. Otherwise, all identity escapes are allowed.
- if (!unicode() || IsSyntaxCharacterOrSlash(result) || result == '-') {
- Advance();
- return result;
- }
+ default:
+ break;
+ }
+
+ // CharacterEscape ::
+ // IdentityEscape[?UnicodeMode, ?N]
+ //
+ // * With /u, no identity escapes except for syntax characters are
+ // allowed.
+ // * Without /u:
+ // * '\c' is not an IdentityEscape.
+ // * '\k' is not an IdentityEscape when named captures exist.
+ // * Otherwise, all identity escapes are allowed.
+ if (unicode()) {
+ if (!IsSyntaxCharacterOrSlash(c)) {
ReportError(RegExpError::kInvalidEscape);
return 0;
}
+ Advance();
+ return c;
+ }
+ DCHECK(!unicode());
+ if (c == 'c') {
+ ReportError(RegExpError::kInvalidEscape);
+ return 0;
+ }
+ Advance();
+ // Note: It's important to Advance before the HasNamedCaptures call s.t. we
+ // don't start scanning in the middle of an escape.
+ if (c == 'k' && HasNamedCaptures(in_class_escape_state)) {
+ ReportError(RegExpError::kInvalidEscape);
+ return 0;
}
- UNREACHABLE();
+ return c;
}
+// https://tc39.es/ecma262/#prod-ClassEscape
template <class CharT>
void RegExpParserImpl<CharT>::ParseClassEscape(
ZoneList<CharacterRange>* ranges, Zone* zone,
bool add_unicode_case_equivalents, base::uc32* char_out,
bool* is_class_escape) {
- base::uc32 current_char = current();
- if (current_char == '\\') {
- switch (Next()) {
- case 'w':
- case 'W':
- case 'd':
- case 'D':
- case 's':
- case 'S': {
- CharacterRange::AddClassEscape(static_cast<char>(Next()), ranges,
- add_unicode_case_equivalents, zone);
+ *is_class_escape = false;
+
+ if (current() != '\\') {
+ // Not a ClassEscape.
+ *char_out = current();
+ Advance();
+ return;
+ }
+
+ const base::uc32 next = Next();
+ switch (next) {
+ case 'b':
+ *char_out = '\b';
+ Advance(2);
+ return;
+ case '-':
+ if (unicode()) {
+ *char_out = next;
Advance(2);
- *is_class_escape = true;
return;
}
- case kEndMarker:
- ReportError(RegExpError::kEscapeAtEndOfPattern);
- return;
- case 'p':
- case 'P':
- if (unicode()) {
- bool negate = Next() == 'P';
- Advance(2);
- ZoneVector<char> name_1(zone);
- ZoneVector<char> name_2(zone);
- if (!ParsePropertyClassName(&name_1, &name_2) ||
- !AddPropertyClassRange(ranges, negate, name_1, name_2)) {
- ReportError(RegExpError::kInvalidClassPropertyName);
- }
- *is_class_escape = true;
- return;
- }
- break;
- default:
- break;
+ break;
+ case kEndMarker:
+ ReportError(RegExpError::kEscapeAtEndOfPattern);
+ return;
+ default:
+ break;
+ }
+
+ static constexpr InClassEscapeState kInClassEscape =
+ InClassEscapeState::kInClass;
+ *is_class_escape = TryParseCharacterClassEscape(
+ next, kInClassEscape, ranges, zone, add_unicode_case_equivalents);
+ if (*is_class_escape) return;
+
+ bool dummy = false; // Unused.
+ *char_out = ParseCharacterEscape(kInClassEscape, &dummy);
+}
+
+// https://tc39.es/ecma262/#prod-CharacterClassEscape
+template <class CharT>
+bool RegExpParserImpl<CharT>::TryParseCharacterClassEscape(
+ base::uc32 next, InClassEscapeState in_class_escape_state,
+ ZoneList<CharacterRange>* ranges, Zone* zone,
+ bool add_unicode_case_equivalents) {
+ DCHECK_EQ(current(), '\\');
+ DCHECK_EQ(Next(), next);
+
+ switch (next) {
+ case 'd':
+ case 'D':
+ case 's':
+ case 'S':
+ case 'w':
+ case 'W':
+ CharacterRange::AddClassEscape(static_cast<char>(next), ranges,
+ add_unicode_case_equivalents, zone);
+ Advance(2);
+ return true;
+ case 'p':
+ case 'P': {
+ if (!unicode()) return false;
+ bool negate = next == 'P';
+ Advance(2);
+ ZoneVector<char> name_1(zone);
+ ZoneVector<char> name_2(zone);
+ if (!ParsePropertyClassName(&name_1, &name_2) ||
+ !AddPropertyClassRange(ranges, negate, name_1, name_2)) {
+ ReportError(in_class_escape_state == InClassEscapeState::kInClass
+ ? RegExpError::kInvalidClassPropertyName
+ : RegExpError::kInvalidPropertyName);
+ }
+ return true;
}
- *char_out = ParseClassCharacterEscape();
- *is_class_escape = false;
- } else {
- Advance();
- *char_out = current_char;
- *is_class_escape = false;
+ default:
+ return false;
}
}
@@ -2081,29 +2039,32 @@ RegExpTree* RegExpParserImpl<CharT>::ParseCharacterClass(
template <class CharT>
bool RegExpParserImpl<CharT>::Parse(RegExpCompileData* result) {
- DCHECK(result != nullptr);
+ DCHECK_NOT_NULL(result);
RegExpTree* tree = ParsePattern();
+
if (failed()) {
- DCHECK(tree == nullptr);
- DCHECK(error_ != RegExpError::kNone);
+ DCHECK_NULL(tree);
+ DCHECK_NE(error_, RegExpError::kNone);
result->error = error_;
result->error_pos = error_pos_;
- } else {
- DCHECK(tree != nullptr);
- DCHECK(error_ == RegExpError::kNone);
- if (FLAG_trace_regexp_parser) {
- StdoutStream os;
- tree->Print(os, zone());
- os << "\n";
- }
- result->tree = tree;
- int capture_count = captures_started();
- result->simple = tree->IsAtom() && simple() && capture_count == 0;
- result->contains_anchor = contains_anchor();
- result->capture_count = capture_count;
- result->named_captures = GetNamedCaptures();
- }
- return !failed();
+ return false;
+ }
+
+ DCHECK_NOT_NULL(tree);
+ DCHECK_EQ(error_, RegExpError::kNone);
+ if (FLAG_trace_regexp_parser) {
+ StdoutStream os;
+ tree->Print(os, zone());
+ os << "\n";
+ }
+
+ result->tree = tree;
+ const int capture_count = captures_started();
+ result->simple = tree->IsAtom() && simple() && capture_count == 0;
+ result->contains_anchor = contains_anchor();
+ result->capture_count = capture_count;
+ result->named_captures = GetNamedCaptures();
+ return true;
}
RegExpBuilder::RegExpBuilder(Zone* zone, RegExpFlags flags)
diff --git a/deps/v8/src/regexp/regexp-stack.cc b/deps/v8/src/regexp/regexp-stack.cc
index 6d73b7c03d..9c403eed08 100644
--- a/deps/v8/src/regexp/regexp-stack.cc
+++ b/deps/v8/src/regexp/regexp-stack.cc
@@ -11,23 +11,17 @@ namespace v8 {
namespace internal {
RegExpStackScope::RegExpStackScope(Isolate* isolate)
- : regexp_stack_(isolate->regexp_stack()) {
+ : regexp_stack_(isolate->regexp_stack()),
+ old_sp_top_delta_(regexp_stack_->sp_top_delta()) {
DCHECK(regexp_stack_->IsValid());
- // Irregexp is not reentrant in several ways; in particular, the
- // RegExpStackScope is not reentrant since the destructor frees allocated
- // memory. Protect against reentrancy here.
- CHECK(!regexp_stack_->is_in_use());
- regexp_stack_->set_is_in_use(true);
}
-
RegExpStackScope::~RegExpStackScope() {
- // Reset the buffer if it has grown.
- regexp_stack_->Reset();
- DCHECK(!regexp_stack_->is_in_use());
+ CHECK_EQ(old_sp_top_delta_, regexp_stack_->sp_top_delta());
+ regexp_stack_->ResetIfEmpty();
}
-RegExpStack::RegExpStack() : thread_local_(this), isolate_(nullptr) {}
+RegExpStack::RegExpStack() : thread_local_(this) {}
RegExpStack::~RegExpStack() { thread_local_.FreeAndInvalidate(); }
@@ -52,18 +46,16 @@ char* RegExpStack::RestoreStack(char* from) {
return from + kThreadLocalSize;
}
-void RegExpStack::Reset() { thread_local_.ResetToStaticStack(this); }
-
void RegExpStack::ThreadLocal::ResetToStaticStack(RegExpStack* regexp_stack) {
if (owns_memory_) DeleteArray(memory_);
memory_ = regexp_stack->static_stack_;
memory_top_ = regexp_stack->static_stack_ + kStaticStackSize;
memory_size_ = kStaticStackSize;
+ stack_pointer_ = memory_top_;
limit_ = reinterpret_cast<Address>(regexp_stack->static_stack_) +
kStackLimitSlack * kSystemPointerSize;
owns_memory_ = false;
- is_in_use_ = false;
}
void RegExpStack::ThreadLocal::FreeAndInvalidate() {
@@ -74,6 +66,7 @@ void RegExpStack::ThreadLocal::FreeAndInvalidate() {
memory_ = nullptr;
memory_top_ = nullptr;
memory_size_ = 0;
+ stack_pointer_ = nullptr;
limit_ = kMemoryTop;
}
@@ -88,9 +81,11 @@ Address RegExpStack::EnsureCapacity(size_t size) {
thread_local_.memory_, thread_local_.memory_size_);
if (thread_local_.owns_memory_) DeleteArray(thread_local_.memory_);
}
+ ptrdiff_t delta = sp_top_delta();
thread_local_.memory_ = new_memory;
thread_local_.memory_top_ = new_memory + size;
thread_local_.memory_size_ = size;
+ thread_local_.stack_pointer_ = thread_local_.memory_top_ + delta;
thread_local_.limit_ = reinterpret_cast<Address>(new_memory) +
kStackLimitSlack * kSystemPointerSize;
thread_local_.owns_memory_ = true;
diff --git a/deps/v8/src/regexp/regexp-stack.h b/deps/v8/src/regexp/regexp-stack.h
index adca683ff8..d52ca3e1d0 100644
--- a/deps/v8/src/regexp/regexp-stack.h
+++ b/deps/v8/src/regexp/regexp-stack.h
@@ -16,10 +16,7 @@ class RegExpStack;
// Maintains a per-v8thread stack area that can be used by irregexp
// implementation for its backtracking stack.
-// Since there is only one stack area, the Irregexp implementation is not
-// re-entrant. I.e., no regular expressions may be executed in the same thread
-// during a preempted Irregexp execution.
-class V8_NODISCARD RegExpStackScope {
+class V8_NODISCARD RegExpStackScope final {
public:
// Create and delete an instance to control the life-time of a growing stack.
@@ -32,46 +29,45 @@ class V8_NODISCARD RegExpStackScope {
RegExpStack* stack() const { return regexp_stack_; }
private:
- RegExpStack* regexp_stack_;
+ RegExpStack* const regexp_stack_;
+ const ptrdiff_t old_sp_top_delta_;
};
-class RegExpStack {
+class RegExpStack final {
public:
RegExpStack();
~RegExpStack();
RegExpStack(const RegExpStack&) = delete;
RegExpStack& operator=(const RegExpStack&) = delete;
- // Number of allocated locations on the stack below the limit.
- // No sequence of pushes must be longer that this without doing a stack-limit
- // check.
+ // Number of allocated locations on the stack below the limit. No sequence of
+ // pushes must be longer than this without doing a stack-limit check.
static constexpr int kStackLimitSlack = 32;
- // Gives the top of the memory used as stack.
- Address stack_base() {
+ Address memory_top() const {
DCHECK_NE(0, thread_local_.memory_size_);
DCHECK_EQ(thread_local_.memory_top_,
thread_local_.memory_ + thread_local_.memory_size_);
return reinterpret_cast<Address>(thread_local_.memory_top_);
}
- // The total size of the memory allocated for the stack.
- size_t stack_capacity() { return thread_local_.memory_size_; }
+ Address stack_pointer() const {
+ return reinterpret_cast<Address>(thread_local_.stack_pointer_);
+ }
+
+ size_t memory_size() const { return thread_local_.memory_size_; }
// If the stack pointer gets below the limit, we should react and
// either grow the stack or report an out-of-stack exception.
// There is only a limited number of locations below the stack limit,
// so users of the stack should check the stack limit during any
// sequence of pushes longer that this.
- Address* limit_address_address() { return &(thread_local_.limit_); }
+ Address* limit_address_address() { return &thread_local_.limit_; }
// Ensures that there is a memory area with at least the specified size.
// If passing zero, the default/minimum size buffer is allocated.
Address EnsureCapacity(size_t size);
- bool is_in_use() const { return thread_local_.is_in_use_; }
- void set_is_in_use(bool v) { thread_local_.is_in_use_ = v; }
-
// Thread local archiving.
static constexpr int ArchiveSpacePerThread() {
return static_cast<int>(kThreadLocalSize);
@@ -103,44 +99,59 @@ class RegExpStack {
STATIC_ASSERT(kStaticStackSize <= kMaximumStackSize);
- // Structure holding the allocated memory, size and limit.
+ // Structure holding the allocated memory, size and limit. Thread switching
+ // archives and restores this struct.
struct ThreadLocal {
explicit ThreadLocal(RegExpStack* regexp_stack) {
ResetToStaticStack(regexp_stack);
}
- // If memory_size_ > 0 then memory_ and memory_top_ must be non-nullptr
- // and memory_top_ = memory_ + memory_size_
+ // If memory_size_ > 0 then
+ // - memory_, memory_top_, stack_pointer_ must be non-nullptr
+ // - memory_top_ = memory_ + memory_size_
+ // - memory_ <= stack_pointer_ <= memory_top_
byte* memory_ = nullptr;
byte* memory_top_ = nullptr;
size_t memory_size_ = 0;
+ byte* stack_pointer_ = nullptr;
Address limit_ = kNullAddress;
bool owns_memory_ = false; // Whether memory_ is owned and must be freed.
- bool is_in_use_ = false; // To guard against reentrancy.
void ResetToStaticStack(RegExpStack* regexp_stack);
+ void ResetToStaticStackIfEmpty(RegExpStack* regexp_stack) {
+ if (stack_pointer_ == memory_top_) ResetToStaticStack(regexp_stack);
+ }
void FreeAndInvalidate();
};
static constexpr size_t kThreadLocalSize = sizeof(ThreadLocal);
- // Address of top of memory used as stack.
Address memory_top_address_address() {
return reinterpret_cast<Address>(&thread_local_.memory_top_);
}
- // Resets the buffer if it has grown beyond the default/minimum size.
- // After this, the buffer is either the default size, or it is empty, so
- // you have to call EnsureCapacity before using it again.
- void Reset();
+ Address stack_pointer_address() {
+ return reinterpret_cast<Address>(&thread_local_.stack_pointer_);
+ }
+
+ // A position-independent representation of the stack pointer.
+ ptrdiff_t sp_top_delta() const {
+ ptrdiff_t result =
+ reinterpret_cast<intptr_t>(thread_local_.stack_pointer_) -
+ reinterpret_cast<intptr_t>(thread_local_.memory_top_);
+ DCHECK_LE(result, 0);
+ return result;
+ }
+
+ // Resets the buffer if it has grown beyond the default/minimum size and is
+ // empty.
+ void ResetIfEmpty() { thread_local_.ResetToStaticStackIfEmpty(this); }
// Whether the ThreadLocal storage has been invalidated.
bool IsValid() const { return thread_local_.memory_ != nullptr; }
ThreadLocal thread_local_;
- Isolate* isolate_;
friend class ExternalReference;
- friend class Isolate;
friend class RegExpStackScope;
};
diff --git a/deps/v8/src/regexp/regexp.cc b/deps/v8/src/regexp/regexp.cc
index 742c6d9999..d739f0bc4e 100644
--- a/deps/v8/src/regexp/regexp.cc
+++ b/deps/v8/src/regexp/regexp.cc
@@ -146,7 +146,7 @@ MaybeHandle<Object> RegExp::ThrowRegExpException(Isolate* isolate,
void RegExp::ThrowRegExpException(Isolate* isolate, Handle<JSRegExp> re,
RegExpError error_text) {
- USE(ThrowRegExpException(isolate, re, Handle<String>(re->Pattern(), isolate),
+ USE(ThrowRegExpException(isolate, re, Handle<String>(re->source(), isolate),
error_text));
}
@@ -273,7 +273,7 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
// static
bool RegExp::EnsureFullyCompiled(Isolate* isolate, Handle<JSRegExp> re,
Handle<String> subject) {
- switch (re->TypeTag()) {
+ switch (re->type_tag()) {
case JSRegExp::NOT_COMPILED:
UNREACHABLE();
case JSRegExp::ATOM:
@@ -308,7 +308,7 @@ MaybeHandle<Object> RegExp::Exec(Isolate* isolate, Handle<JSRegExp> regexp,
Handle<String> subject, int index,
Handle<RegExpMatchInfo> last_match_info,
ExecQuirks exec_quirks) {
- switch (regexp->TypeTag()) {
+ switch (regexp->type_tag()) {
case JSRegExp::NOT_COMPILED:
UNREACHABLE();
case JSRegExp::ATOM:
@@ -352,7 +352,7 @@ int RegExpImpl::AtomExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
subject = String::Flatten(isolate, subject);
DisallowGarbageCollection no_gc; // ensure vectors stay valid
- String needle = String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex));
+ String needle = regexp->atom_pattern();
int needle_len = needle.length();
DCHECK(needle.IsFlat());
DCHECK_LT(0, needle_len);
@@ -420,8 +420,8 @@ Handle<Object> RegExpImpl::AtomExec(Isolate* isolate, Handle<JSRegExp> re,
bool RegExpImpl::EnsureCompiledIrregexp(Isolate* isolate, Handle<JSRegExp> re,
Handle<String> sample_subject,
bool is_one_byte) {
- Object compiled_code = re->Code(is_one_byte);
- Object bytecode = re->Bytecode(is_one_byte);
+ Object compiled_code = re->code(is_one_byte);
+ Object bytecode = re->bytecode(is_one_byte);
bool needs_initial_compilation =
compiled_code == Smi::FromInt(JSRegExp::kUninitializedValue);
// Recompile is needed when we're dealing with the first execution of the
@@ -450,8 +450,8 @@ namespace {
#ifdef DEBUG
bool RegExpCodeIsValidForPreCompilation(Handle<JSRegExp> re, bool is_one_byte) {
- Object entry = re->Code(is_one_byte);
- Object bytecode = re->Bytecode(is_one_byte);
+ Object entry = re->code(is_one_byte);
+ Object bytecode = re->bytecode(is_one_byte);
// If we're not using the tier-up strategy, entry can only be a smi
// representing an uncompiled regexp here. If we're using the tier-up
// strategy, entry can still be a smi representing an uncompiled regexp, when
@@ -528,9 +528,9 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
DCHECK(RegExpCodeIsValidForPreCompilation(re, is_one_byte));
- RegExpFlags flags = JSRegExp::AsRegExpFlags(re->GetFlags());
+ RegExpFlags flags = JSRegExp::AsRegExpFlags(re->flags());
- Handle<String> pattern(re->Pattern(), isolate);
+ Handle<String> pattern(re->source(), isolate);
pattern = String::Flatten(isolate, pattern);
RegExpCompileData compile_data;
if (!RegExpParser::ParseRegExpFromHeapString(isolate, &zone, pattern, flags,
@@ -548,7 +548,7 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
compile_data.compilation_target = re->ShouldProduceBytecode()
? RegExpCompilationTarget::kBytecode
: RegExpCompilationTarget::kNative;
- uint32_t backtrack_limit = re->BacktrackLimit();
+ uint32_t backtrack_limit = re->backtrack_limit();
const bool compilation_succeeded =
Compile(isolate, &zone, &compile_data, flags, pattern, sample_subject,
is_one_byte, backtrack_limit);
@@ -581,7 +581,7 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
}
Handle<FixedArray> capture_name_map =
RegExp::CreateCaptureNameMap(isolate, compile_data.named_captures);
- re->SetCaptureNameMap(capture_name_map);
+ re->set_capture_name_map(capture_name_map);
int register_max = IrregexpMaxRegisterCount(*data);
if (compile_data.register_count > register_max) {
SetIrregexpMaxRegisterCount(*data, compile_data.register_count);
@@ -644,7 +644,7 @@ int RegExpImpl::IrregexpPrepare(Isolate* isolate, Handle<JSRegExp> regexp,
// Only reserve room for output captures. Internal registers are allocated by
// the engine.
- return JSRegExp::RegistersForCaptureCount(regexp->CaptureCount());
+ return JSRegExp::RegistersForCaptureCount(regexp->capture_count());
}
int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
@@ -654,7 +654,7 @@ int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
DCHECK_LE(index, subject->length());
DCHECK(subject->IsFlat());
DCHECK_GE(output_size,
- JSRegExp::RegistersForCaptureCount(regexp->CaptureCount()));
+ JSRegExp::RegistersForCaptureCount(regexp->capture_count()));
bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
@@ -721,14 +721,13 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(
Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
int previous_index, Handle<RegExpMatchInfo> last_match_info,
RegExp::ExecQuirks exec_quirks) {
- DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
+ DCHECK_EQ(regexp->type_tag(), JSRegExp::IRREGEXP);
subject = String::Flatten(isolate, subject);
#ifdef DEBUG
if (FLAG_trace_regexp_bytecodes && regexp->ShouldProduceBytecode()) {
- String pattern = regexp->Pattern();
- PrintF("\n\nRegexp match: /%s/\n\n", pattern.ToCString().get());
+ PrintF("\n\nRegexp match: /%s/\n\n", regexp->source().ToCString().get());
PrintF("\n\nSubject string: '%s'\n\n", subject->ToCString().get());
}
#endif
@@ -775,7 +774,7 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(
return isolate->factory()->null_value();
}
}
- int capture_count = regexp->CaptureCount();
+ int capture_count = regexp->capture_count();
return RegExp::SetLastMatchInfo(isolate, last_match_info, subject,
capture_count, output_registers);
} else if (res == RegExp::RE_FALLBACK_TO_EXPERIMENTAL) {
@@ -1042,9 +1041,9 @@ RegExpGlobalCache::RegExpGlobalCache(Handle<JSRegExp> regexp,
regexp_(regexp),
subject_(subject),
isolate_(isolate) {
- DCHECK(IsGlobal(JSRegExp::AsRegExpFlags(regexp->GetFlags())));
+ DCHECK(IsGlobal(JSRegExp::AsRegExpFlags(regexp->flags())));
- switch (regexp_->TypeTag()) {
+ switch (regexp_->type_tag()) {
case JSRegExp::NOT_COMPILED:
UNREACHABLE();
case JSRegExp::ATOM: {
@@ -1081,7 +1080,7 @@ RegExpGlobalCache::RegExpGlobalCache(Handle<JSRegExp> regexp,
return;
}
registers_per_match_ =
- JSRegExp::RegistersForCaptureCount(regexp->CaptureCount());
+ JSRegExp::RegistersForCaptureCount(regexp->capture_count());
register_array_size_ = std::max(
{registers_per_match_, Isolate::kJSRegexpStaticOffsetsVectorSize});
break;
@@ -1117,7 +1116,7 @@ RegExpGlobalCache::~RegExpGlobalCache() {
}
int RegExpGlobalCache::AdvanceZeroLength(int last_index) {
- if (IsUnicode(JSRegExp::AsRegExpFlags(regexp_->GetFlags())) &&
+ if (IsUnicode(JSRegExp::AsRegExpFlags(regexp_->flags())) &&
last_index + 1 < subject_->length() &&
unibrow::Utf16::IsLeadSurrogate(subject_->Get(last_index)) &&
unibrow::Utf16::IsTrailSurrogate(subject_->Get(last_index + 1))) {
@@ -1142,7 +1141,7 @@ int32_t* RegExpGlobalCache::FetchNext() {
&register_array_[(current_match_index_ - 1) * registers_per_match_];
int last_end_index = last_match[1];
- switch (regexp_->TypeTag()) {
+ switch (regexp_->type_tag()) {
case JSRegExp::NOT_COMPILED:
UNREACHABLE();
case JSRegExp::ATOM:
diff --git a/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc b/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc
index 3269779efa..bb15bc24ed 100644
--- a/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc
+++ b/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.cc
@@ -40,17 +40,16 @@ namespace internal {
*
* The stack will have the following structure:
*
- * - fp[80] Isolate* isolate (address of the current isolate) kIsolate
* kStackFrameHeader
* --- sp when called ---
* - fp[72] ra Return from RegExp code (ra). kReturnAddress
* - fp[64] s9, old-fp Old fp, callee saved(s9).
* - fp[0..63] fp..s7 Callee-saved registers fp..s7.
* --- frame pointer ----
- * - fp[-8] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall
- * - fp[-16] stack_base (Top of backtracking stack). kStackHighEnd
- * - fp[-24] capture array size (may fit multiple sets of matches) kNumOutputRegisters
- * - fp[-32] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput
+ * - fp[-8] Isolate* isolate (address of the current isolate) kIsolate
+ * - fp[-16] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall
+ * - fp[-24] output_size (may fit multiple sets of matches) kNumOutputRegisters
+ * - fp[-32] int* output (int[num_saved_registers_], for output). kRegisterOutput
* - fp[-40] end of input (address of end of string). kInputEnd
* - fp[-48] start of input (address of first character in string). kInputStart
* - fp[-56] start index (character index of start). kStartIndex
@@ -77,11 +76,11 @@ namespace internal {
* int start_index,
* Address start,
* Address end,
- * int* capture_output_array,
- * int num_capture_registers,
- * byte* stack_area_base,
+ * int* output,
+ * int output_size,
* bool direct_call = false,
- * Isolate* isolate);
+ * Isolate* isolate,
+ * Address regexp);
* The call is performed by NativeRegExpMacroAssembler::Execute()
* (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
*
@@ -96,8 +95,10 @@ RegExpMacroAssemblerRISCV::RegExpMacroAssemblerRISCV(Isolate* isolate,
Zone* zone, Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ masm_(std::make_unique<MacroAssembler>(
+ isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -107,8 +108,6 @@ RegExpMacroAssemblerRISCV::RegExpMacroAssemblerRISCV(Isolate* isolate,
backtrack_label_(),
exit_label_(),
internal_failure_label_() {
- masm_->set_root_array_available(false);
-
DCHECK_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
// If the code gets too big or corrupted, an internal exception will be
@@ -120,7 +119,6 @@ RegExpMacroAssemblerRISCV::RegExpMacroAssemblerRISCV(Isolate* isolate,
}
RegExpMacroAssemblerRISCV::~RegExpMacroAssemblerRISCV() {
- delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
@@ -335,7 +333,7 @@ void RegExpMacroAssemblerRISCV::CheckNotBackReferenceIgnoreCase(
__ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
{
- AllowExternalCallThatCantCauseGC scope(masm_);
+ AllowExternalCallThatCantCauseGC scope(masm_.get());
ExternalReference function =
unicode ? ExternalReference::re_case_insensitive_compare_unicode(
isolate())
@@ -593,6 +591,43 @@ void RegExpMacroAssemblerRISCV::Fail() {
__ jmp(&exit_label_);
}
+void RegExpMacroAssemblerRISCV::LoadRegExpStackPointerFromMemory(Register dst) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ li(dst, Operand(ref));
+ __ Ld(dst, MemOperand(dst));
+}
+
+void RegExpMacroAssemblerRISCV::StoreRegExpStackPointerToMemory(
+ Register src, Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ li(scratch, Operand(ref));
+ __ Sd(src, MemOperand(scratch));
+}
+
+void RegExpMacroAssemblerRISCV::PushRegExpBasePointer(Register scratch1,
+ Register scratch2) {
+ LoadRegExpStackPointerFromMemory(scratch1);
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(scratch2, Operand(ref));
+ __ Ld(scratch2, MemOperand(scratch2));
+ __ Sub64(scratch2, scratch1, scratch2);
+ __ Sd(scratch2, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+}
+
+void RegExpMacroAssemblerRISCV::PopRegExpBasePointer(Register scratch1,
+ Register scratch2) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ Ld(scratch1, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ li(scratch2, ref);
+ __ Ld(scratch2, MemOperand(scratch2));
+ __ Add64(scratch1, scratch1, scratch2);
+ StoreRegExpStackPointerToMemory(scratch1, scratch2);
+}
+
Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
Label return_a0;
if (masm_->has_exception()) {
@@ -609,7 +644,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
// Tell the system that we have a stack frame. Because the type is MANUAL,
// no is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
+ FrameScope scope(masm_.get(), StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
// Push arguments
@@ -628,14 +663,14 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
// entry as cast to a function with the signature:
//
// *int(*match)(String input_string, // a0
- // int start_index, // a1
- // Address start, // a2
- // Address end, // a3
- // int*capture_output_array, // a4
- // int num_capture_registers, // a5
- // byte* stack_area_base, // a6
- // bool direct_call = false, // a7
- // Isolate * isolate); // on the stack
+ // int start_offset, // a1
+ // byte* input_start, // a2
+ // byte* input_end, // a3
+ // int* output, // a4
+ // int output_size, // a5
+ // int call_origin, // a6
+ // Isolate* isolate, // a7
+ // Address regexp); // on the stack
RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit() |
a4.bit() | a5.bit() | a6.bit() | a7.bit();
@@ -656,6 +691,12 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
__ push(a0); // Make room for "string start - 1" constant.
STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
__ push(a0); // The backtrack counter
+ STATIC_ASSERT(kRegExpStackBasePointer ==
+ kBacktrackCount - kSystemPointerSize);
+ __ push(a0); // The regexp stack base ptr.
+ // Store the regexp base pointer - we'll later restore it / write it to
+ // memory when returning from this irregexp code object.
+ PushRegExpBasePointer(a0, a1);
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -737,7 +778,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
}
// Initialize backtrack stack pointer.
- __ Ld(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
__ jmp(&start_label_);
@@ -838,6 +879,9 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
}
__ bind(&return_a0);
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(a1, a2);
// Skip sp past regexp registers and local variables..
__ mv(sp, frame_pointer());
@@ -857,6 +901,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
// Preempt-code.
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a1);
// Put regexp engine registers on stack.
RegList regexp_registers_to_retain = current_input_offset().bit() |
current_character().bit() |
@@ -867,7 +912,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
// If returning non-zero, we should end execution with the given
// result as return value.
__ Branch(&return_a0, ne, a0, Operand(zero_reg));
-
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
// String might have moved: Reload end of string from frame.
__ Ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
@@ -877,25 +922,18 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
// Backtrack stack overflow code.
if (stack_overflow_label_.is_linked()) {
SafeCallTarget(&stack_overflow_label_);
- // Reached if the backtrack-stack limit has been hit.
- // Put regexp engine registers on stack first.
- RegList regexp_registers =
- current_input_offset().bit() | current_character().bit();
- __ MultiPush(regexp_registers);
-
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, a0);
- __ mv(a0, backtrack_stackpointer());
- __ Add64(a1, frame_pointer(), Operand(kStackHighEnd));
- __ li(a2, Operand(ExternalReference::isolate_address(masm_->isolate())));
+ // Call GrowStack(isolate).
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(),
+ a1);
+
+ static constexpr int kNumArguments = 1;
+ __ PrepareCallCFunction(kNumArguments, 0, a0);
+ __ li(a0, ExternalReference::isolate_address(isolate()));
ExternalReference grow_stack =
- ExternalReference::re_grow_stack(masm_->isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // Restore regexp registers.
- __ MultiPop(regexp_registers);
- // If return nullptr, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
+ ExternalReference::re_grow_stack(isolate());
+ __ CallCFunction(grow_stack, kNumArguments);
+ // If nullptr is returned, we have failed to grow the stack, and must exit
+ // with a stack-overflow exception.
__ BranchShort(&exit_with_exception, eq, a0, Operand(zero_reg));
// Otherwise use return value as new stack pointer.
__ mv(backtrack_stackpointer(), a0);
@@ -976,7 +1014,7 @@ void RegExpMacroAssemblerRISCV::PushBacktrack(Label* label) {
int target = label->pos();
__ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
} else {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_.get());
Label after_constant;
__ BranchShort(&after_constant);
int offset = masm_->pc_offset();
@@ -1010,10 +1048,22 @@ void RegExpMacroAssemblerRISCV::ReadCurrentPositionFromRegister(int reg) {
__ Ld(current_input_offset(), register_location(reg));
}
+void RegExpMacroAssemblerRISCV::WriteStackPointerToRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(a0, ref);
+ __ Ld(a0, MemOperand(a0));
+ __ Sub64(a0, backtrack_stackpointer(), a0);
+ __ Sw(a0, register_location(reg));
+}
+
void RegExpMacroAssemblerRISCV::ReadStackPointerFromRegister(int reg) {
- __ Ld(backtrack_stackpointer(), register_location(reg));
- __ Ld(a0, MemOperand(frame_pointer(), kStackHighEnd));
- __ Add64(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ li(a1, ref);
+ __ Ld(a1, MemOperand(a1));
+ __ Lw(backtrack_stackpointer(), register_location(reg));
+ __ Add64(backtrack_stackpointer(), backtrack_stackpointer(), a1);
}
void RegExpMacroAssemblerRISCV::SetCurrentPositionFromEnd(int by) {
@@ -1057,11 +1107,6 @@ void RegExpMacroAssemblerRISCV::ClearRegisters(int reg_from, int reg_to) {
}
}
-void RegExpMacroAssemblerRISCV::WriteStackPointerToRegister(int reg) {
- __ Ld(a1, MemOperand(frame_pointer(), kStackHighEnd));
- __ Sub64(a0, backtrack_stackpointer(), a1);
- __ Sd(a0, register_location(reg));
-}
bool RegExpMacroAssemblerRISCV::CanReadUnaligned() { return false; }
diff --git a/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.h b/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.h
index a5d5bb529e..211f17b314 100644
--- a/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.h
+++ b/deps/v8/src/regexp/riscv64/regexp-macro-assembler-riscv64.h
@@ -105,14 +105,11 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerRISCV
// Stack frame header.
static const int kStackFrameHeader = kReturnAddress;
- // Stack parameters placed by caller.
- static const int kIsolate = kStackFrameHeader + kSystemPointerSize;
-
// Below the frame pointer.
// Register parameters stored by setup code.
- static const int kDirectCall = kFramePointer - kSystemPointerSize;
- static const int kStackHighEnd = kDirectCall - kSystemPointerSize;
- static const int kNumOutputRegisters = kStackHighEnd - kSystemPointerSize;
+ static const int kIsolate = kFramePointer - kSystemPointerSize;
+ static const int kDirectCall = kIsolate - kSystemPointerSize;
+ static const int kNumOutputRegisters = kDirectCall - kSystemPointerSize;
static const int kRegisterOutput = kNumOutputRegisters - kSystemPointerSize;
static const int kInputEnd = kRegisterOutput - kSystemPointerSize;
static const int kInputStart = kInputEnd - kSystemPointerSize;
@@ -124,8 +121,14 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerRISCV
static const int kStringStartMinusOne =
kSuccessfulCaptures - kSystemPointerSize;
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // Stores the initial value of the regexp stack pointer in a
+ // position-independent representation (in case the regexp stack grows and
+ // thus moves).
+ static const int kRegExpStackBasePointer =
+ kBacktrackCount - kSystemPointerSize;
+ static constexpr int kNumberOfStackLocals = 4;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
+ static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
@@ -144,27 +147,27 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerRISCV
// Register holding the current input position as negative offset from
// the end of the string.
- inline Register current_input_offset() { return a6; }
+ static constexpr Register current_input_offset() { return a6; }
// The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return a7; }
+ static constexpr Register current_character() { return a7; }
// Register holding address of the end of the input string.
- inline Register end_of_input_address() { return t2; }
+ static constexpr Register end_of_input_address() { return t2; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
- inline Register frame_pointer() { return fp; }
+ static constexpr Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- inline Register backtrack_stackpointer() { return t0; }
+ static constexpr Register backtrack_stackpointer() { return t0; }
// Register holding pointer to the current code object.
- inline Register code_pointer() { return a5; }
+ static constexpr Register code_pointer() { return a5; }
// Byte size of chars in the string to match (decided by the Mode argument).
- inline int char_size() { return static_cast<int>(mode_); }
+ inline int char_size() const { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is nullptr, in which case it is a conditional Backtrack.
@@ -186,19 +189,25 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerRISCV
// and increments it by a word size.
inline void Pop(Register target);
+ void LoadRegExpStackPointerFromMemory(Register dst);
+ void StoreRegExpStackPointerToMemory(Register src, Register scratch);
+ void PushRegExpBasePointer(Register scratch1, Register scratch2);
+ void PopRegExpBasePointer(Register scratch1, Register scratch2);
+
Isolate* isolate() const { return masm_->isolate(); }
- MacroAssembler* masm_;
+ const std::unique_ptr<MacroAssembler> masm_;
+ const NoRootArrayScope no_root_array_scope_;
// Which mode to generate code for (Latin1 or UC16).
- Mode mode_;
+ const Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1).
- int num_saved_registers_;
+ const int num_saved_registers_;
// Labels used internally.
Label entry_label_;
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index 83092e5336..6945aa3f6e 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -6,15 +6,14 @@
#if V8_TARGET_ARCH_S390
-#include "src/base/bits.h"
-#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
+#include "src/codegen/s390/assembler-s390-inl.h"
+#include "src/heap/factory.h"
#include "src/logging/log.h"
-#include "src/regexp/regexp-macro-assembler.h"
+#include "src/objects/code-inl.h"
#include "src/regexp/regexp-stack.h"
#include "src/regexp/s390/regexp-macro-assembler-s390.h"
#include "src/snapshot/embedded/embedded-data.h"
-#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -102,8 +101,10 @@ RegExpMacroAssemblerS390::RegExpMacroAssemblerS390(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
- NewAssemblerBuffer(kRegExpCodeSize))),
+ masm_(std::make_unique<MacroAssembler>(
+ isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ no_root_array_scope_(masm_.get()),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -113,8 +114,6 @@ RegExpMacroAssemblerS390::RegExpMacroAssemblerS390(Isolate* isolate, Zone* zone,
backtrack_label_(),
exit_label_(),
internal_failure_label_() {
- masm_->set_root_array_available(false);
-
DCHECK_EQ(0, registers_to_save % 2);
__ b(&entry_label_); // We'll write the entry code later.
@@ -127,7 +126,6 @@ RegExpMacroAssemblerS390::RegExpMacroAssemblerS390(Isolate* isolate, Zone* zone,
}
RegExpMacroAssemblerS390::~RegExpMacroAssemblerS390() {
- delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
@@ -353,7 +351,7 @@ void RegExpMacroAssemblerS390::CheckNotBackReferenceIgnoreCase(
__ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
{
- AllowExternalCallThatCantCauseGC scope(masm_);
+ AllowExternalCallThatCantCauseGC scope(masm_.get());
ExternalReference function =
unicode ? ExternalReference::re_case_insensitive_compare_unicode(
isolate())
@@ -629,6 +627,43 @@ void RegExpMacroAssemblerS390::Fail() {
__ b(&exit_label_);
}
+void RegExpMacroAssemblerS390::LoadRegExpStackPointerFromMemory(Register dst) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ mov(dst, Operand(ref));
+ __ LoadU64(dst, MemOperand(dst));
+}
+
+void RegExpMacroAssemblerS390::StoreRegExpStackPointerToMemory(
+ Register src, Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ mov(scratch, Operand(ref));
+ __ StoreU64(src, MemOperand(scratch));
+}
+
+void RegExpMacroAssemblerS390::PushRegExpBasePointer(Register stack_pointer,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(scratch, Operand(ref));
+ __ LoadU64(scratch, MemOperand(scratch));
+ __ SubS64(scratch, stack_pointer, scratch);
+ __ StoreU64(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+}
+
+void RegExpMacroAssemblerS390::PopRegExpBasePointer(Register stack_pointer_out,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ LoadU64(stack_pointer_out,
+ MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ mov(scratch, Operand(ref));
+ __ LoadU64(scratch, MemOperand(scratch));
+ __ AddS64(stack_pointer_out, stack_pointer_out, scratch);
+ StoreRegExpStackPointerToMemory(stack_pointer_out, scratch);
+}
+
Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
Label return_r2;
@@ -640,7 +675,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// Tell the system that we have a stack frame. Because the type
// is MANUAL, no is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
+ FrameScope scope(masm_.get(), StackFrame::MANUAL);
// Ensure register assigments are consistent with callee save mask
DCHECK(r6.bit() & kRegExpCalleeSaved);
@@ -689,33 +724,47 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
__ StoreMultipleP(r0, r9, MemOperand(sp, 0));
STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
__ Push(r1); // The backtrack counter.
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
+ STATIC_ASSERT(kRegExpStackBasePointer ==
+ kBacktrackCount - kSystemPointerSize);
+ __ push(r1); // The regexp stack base ptr.
+
+ // Initialize backtrack stack pointer. It must not be clobbered from here on.
+ // Note the backtrack_stackpointer is callee-saved.
+ STATIC_ASSERT(backtrack_stackpointer() == r13);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
+ // Store the regexp base pointer - we'll later restore it / write it to
+ // memory when returning from this irregexp code object.
+ PushRegExpBasePointer(backtrack_stackpointer(), r3);
+
+ {
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit, stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(isolate());
+ __ mov(r2, Operand(stack_limit));
+ __ LoadU64(r2, MemOperand(r2));
+ __ SubS64(r2, sp, r2);
+ // Handle it if the stack pointer is already below the stack limit.
+ __ ble(&stack_limit_hit);
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ CmpU64(r2, Operand(num_registers_ * kSystemPointerSize));
+ __ bge(&stack_ok);
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ mov(r2, Operand(EXCEPTION));
+ __ b(&return_r2);
- ExternalReference stack_limit =
- ExternalReference::address_of_jslimit(isolate());
- __ mov(r2, Operand(stack_limit));
- __ LoadU64(r2, MemOperand(r2));
- __ SubS64(r2, sp, r2);
- // Handle it if the stack pointer is already below the stack limit.
- __ ble(&stack_limit_hit);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ CmpU64(r2, Operand(num_registers_ * kSystemPointerSize));
- __ bge(&stack_ok);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ mov(r2, Operand(EXCEPTION));
- __ b(&return_r2);
-
- __ bind(&stack_limit_hit);
- CallCheckStackGuardState(r2);
- __ CmpS64(r2, Operand::Zero());
- // If returned value is non-zero, we exit with the returned value as result.
- __ bne(&return_r2);
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState(r2);
+ __ CmpS64(r2, Operand::Zero());
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ bne(&return_r2);
- __ bind(&stack_ok);
+ __ bind(&stack_ok);
+ }
// Allocate space on stack for registers.
__ lay(sp, MemOperand(sp, (-num_registers_ * kSystemPointerSize)));
@@ -743,18 +792,21 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ CmpS64(r3, Operand::Zero());
- __ bne(&load_char_start_regexp);
- __ mov(current_character(), Operand('\n'));
- __ b(&start_regexp);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
+ Label load_char_start_regexp;
+ {
+ Label start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ CmpS64(r3, Operand::Zero());
+ __ bne(&load_char_start_regexp);
+ __ mov(current_character(), Operand('\n'));
+ __ b(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+ }
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
@@ -776,10 +828,6 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
}
}
- // Initialize backtrack stack pointer.
- __ LoadU64(backtrack_stackpointer(),
- MemOperand(frame_pointer(), kStackHighEnd));
-
__ b(&start_label_);
// Exit code:
@@ -872,6 +920,10 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
// Prepare r2 to initialize registers with its value in the next run.
__ LoadU64(r2, MemOperand(frame_pointer(), kStringStartMinusOne));
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), r4);
+
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// r6: capture start index
@@ -901,6 +953,10 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
}
__ bind(&return_r2);
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), r4);
+
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers r6..r15.
@@ -920,12 +976,16 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), r3);
+
CallCheckStackGuardState(r2);
__ CmpS64(r2, Operand::Zero());
// If returning non-zero, we should end execution with the given
// result as return value.
__ bne(&return_r2);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
// String might have moved: Reload end of string from frame.
__ LoadU64(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
SafeReturn();
@@ -936,16 +996,17 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
SafeCallTarget(&stack_overflow_label_);
// Reached if the backtrack-stack limit has been hit.
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, r2);
- __ mov(r2, backtrack_stackpointer());
- __ AddS64(r3, frame_pointer(), Operand(kStackHighEnd));
- __ mov(r4, Operand(ExternalReference::isolate_address(isolate())));
+ // Call GrowStack(isolate).
+
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), r3);
+
+ static constexpr int kNumArguments = 1;
+ __ PrepareCallCFunction(kNumArguments, r2);
+ __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
ExternalReference grow_stack = ExternalReference::re_grow_stack(isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // If return nullptr, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
+ __ CallCFunction(grow_stack, kNumArguments);
+ // If nullptr is returned, we have failed to grow the stack, and must exit
+ // with a stack-overflow exception.
__ CmpS64(r2, Operand::Zero());
__ beq(&exit_with_exception);
// Otherwise use return value as new stack pointer.
@@ -1041,10 +1102,22 @@ void RegExpMacroAssemblerS390::ReadCurrentPositionFromRegister(int reg) {
__ LoadU64(current_input_offset(), register_location(reg), r0);
}
+void RegExpMacroAssemblerS390::WriteStackPointerToRegister(int reg) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(r3, Operand(ref));
+ __ LoadU64(r3, MemOperand(r3));
+ __ SubS64(r2, backtrack_stackpointer(), r3);
+ __ StoreU64(r2, register_location(reg));
+}
+
void RegExpMacroAssemblerS390::ReadStackPointerFromRegister(int reg) {
- __ LoadU64(backtrack_stackpointer(), register_location(reg), r0);
- __ LoadU64(r2, MemOperand(frame_pointer(), kStackHighEnd));
- __ AddS64(backtrack_stackpointer(), r2);
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ mov(r2, Operand(ref));
+ __ LoadU64(r2, MemOperand(r2));
+ __ LoadU64(backtrack_stackpointer(), register_location(reg));
+ __ AddS64(backtrack_stackpointer(), backtrack_stackpointer(), r2);
}
void RegExpMacroAssemblerS390::SetCurrentPositionFromEnd(int by) {
@@ -1088,12 +1161,6 @@ void RegExpMacroAssemblerS390::ClearRegisters(int reg_from, int reg_to) {
}
}
-void RegExpMacroAssemblerS390::WriteStackPointerToRegister(int reg) {
- __ LoadU64(r3, MemOperand(frame_pointer(), kStackHighEnd));
- __ SubS64(r2, backtrack_stackpointer(), r3);
- __ StoreU64(r2, register_location(reg));
-}
-
// Private methods:
void RegExpMacroAssemblerS390::CallCheckStackGuardState(Register scratch) {
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
index 8e8601fc7c..458eec2c8e 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -5,9 +5,7 @@
#ifndef V8_REGEXP_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
#define V8_REGEXP_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
-#include "src/base/strings.h"
#include "src/codegen/macro-assembler.h"
-#include "src/codegen/s390/assembler-s390.h"
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
@@ -90,21 +88,15 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390
static const int kFramePointer = 0;
// Above the frame pointer - Stored registers and stack passed parameters.
- // Register 6-15(sp)
static const int kStoredRegisters = kFramePointer;
static const int kCallerFrame =
kStoredRegisters + kCalleeRegisterSaveAreaSize;
- // Stack parameters placed by caller.
- static const int kCaptureArraySize = kCallerFrame;
- static const int kStackAreaBase = kCallerFrame + kSystemPointerSize;
- // kDirectCall again
- static const int kIsolate = kStackAreaBase + 2 * kSystemPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
- static const int kDirectCall = kFramePointer - kSystemPointerSize;
- static const int kStackHighEnd = kDirectCall - kSystemPointerSize;
- static const int kNumOutputRegisters = kStackHighEnd - kSystemPointerSize;
+ static const int kIsolate = kFramePointer - kSystemPointerSize;
+ static const int kDirectCall = kIsolate - kSystemPointerSize;
+ static const int kNumOutputRegisters = kDirectCall - kSystemPointerSize;
static const int kRegisterOutput = kNumOutputRegisters - kSystemPointerSize;
static const int kInputEnd = kRegisterOutput - kSystemPointerSize;
static const int kInputStart = kInputEnd - kSystemPointerSize;
@@ -116,8 +108,14 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390
static const int kStringStartMinusOne =
kSuccessfulCaptures - kSystemPointerSize;
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // Stores the initial value of the regexp stack pointer in a
+ // position-independent representation (in case the regexp stack grows and
+ // thus moves).
+ static const int kRegExpStackBasePointer =
+ kBacktrackCount - kSystemPointerSize;
+
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
+ static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
@@ -137,27 +135,27 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390
// Register holding the current input position as negative offset from
// the end of the string.
- inline Register current_input_offset() { return r8; }
+ static constexpr Register current_input_offset() { return r8; }
// The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return r9; }
+ static constexpr Register current_character() { return r9; }
// Register holding address of the end of the input string.
- inline Register end_of_input_address() { return r10; }
+ static constexpr Register end_of_input_address() { return r10; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
- inline Register frame_pointer() { return fp; }
+ static constexpr Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- inline Register backtrack_stackpointer() { return r13; }
+ static constexpr Register backtrack_stackpointer() { return r13; }
// Register holding pointer to the current code object.
- inline Register code_pointer() { return r7; }
+ static constexpr Register code_pointer() { return r7; }
// Byte size of chars in the string to match (decided by the Mode argument)
- inline int char_size() { return static_cast<int>(mode_); }
+ inline int char_size() const { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is nullptr, in which case it is a conditional Backtrack.
@@ -177,19 +175,25 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390
// and increments it by a word size.
inline void Pop(Register target);
+ void LoadRegExpStackPointerFromMemory(Register dst);
+ void StoreRegExpStackPointerToMemory(Register src, Register scratch);
+ void PushRegExpBasePointer(Register stack_pointer, Register scratch);
+ void PopRegExpBasePointer(Register stack_pointer_out, Register scratch);
+
Isolate* isolate() const { return masm_->isolate(); }
- MacroAssembler* masm_;
+ const std::unique_ptr<MacroAssembler> masm_;
+ const NoRootArrayScope no_root_array_scope_;
// Which mode to generate code for (Latin1 or UC16).
- Mode mode_;
+ const Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1)
- int num_saved_registers_;
+ const int num_saved_registers_;
// Labels used internally.
Label entry_label_;
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 6f0cb53e8f..c2185dbcc5 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -6,13 +6,13 @@
#include "src/regexp/x64/regexp-macro-assembler-x64.h"
+#include "src/codegen/code-desc.h"
#include "src/codegen/macro-assembler.h"
#include "src/heap/factory.h"
#include "src/logging/log.h"
-#include "src/objects/objects-inl.h"
+#include "src/objects/code-inl.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
-#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -47,14 +47,12 @@ namespace internal {
* Each call to a C++ method should retain these registers.
*
* The stack will have the following content, in some order, indexable from the
- * frame pointer (see, e.g., kStackHighEnd):
+ * frame pointer (see, e.g., kDirectCall):
* - Address regexp (address of the JSRegExp object; unused in native
* code, passed to match signature of interpreter)
* - Isolate* isolate (address of the current isolate)
* - direct_call (if 1, direct call from JavaScript code, if 0 call
* through the runtime system)
- * - stack_area_base (high end of the memory area to use as
- * backtracking stack)
* - capture array size (may fit multiple sets of matches)
* - int* capture_array (int[num_saved_registers_], for output).
* - end of input (address of end of string)
@@ -85,7 +83,6 @@ namespace internal {
* Address end,
* int* capture_output_array,
* int num_capture_registers,
- * byte* stack_area_base,
* bool direct_call = false,
* Isolate* isolate,
* Address regexp);
@@ -664,31 +661,64 @@ void RegExpMacroAssemblerX64::Fail() {
__ jmp(&exit_label_);
}
+void RegExpMacroAssemblerX64::LoadRegExpStackPointerFromMemory(Register dst) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ movq(dst, __ ExternalReferenceAsOperand(ref, dst));
+}
+
+void RegExpMacroAssemblerX64::StoreRegExpStackPointerToMemory(
+ Register src, Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_stack_pointer(isolate());
+ __ movq(__ ExternalReferenceAsOperand(ref, scratch), src);
+}
+
+void RegExpMacroAssemblerX64::PushRegExpBasePointer(Register stack_pointer,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ movq(scratch, __ ExternalReferenceAsOperand(ref, scratch));
+ __ subq(scratch, stack_pointer);
+ __ movq(Operand(rbp, kRegExpStackBasePointer), scratch);
+}
+
+void RegExpMacroAssemblerX64::PopRegExpBasePointer(Register stack_pointer_out,
+ Register scratch) {
+ ExternalReference ref =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ movq(scratch, Operand(rbp, kRegExpStackBasePointer));
+ __ movq(stack_pointer_out,
+ __ ExternalReferenceAsOperand(ref, stack_pointer_out));
+ __ subq(stack_pointer_out, scratch);
+ StoreRegExpStackPointerToMemory(stack_pointer_out, scratch);
+}
Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
Label return_rax;
- // Finalize code - write the entry point code now we know how many
- // registers we need.
- // Entry code:
+ // Finalize code - write the entry point code now we know how many registers
+ // we need.
__ bind(&entry_label_);
- // Tell the system that we have a stack frame. Because the type is MANUAL, no
- // is generated.
+ // Tell the system that we have a stack frame. Because the type is MANUAL, no
+ // physical frame is generated.
FrameScope scope(&masm_, StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
__ pushq(rbp);
__ movq(rbp, rsp);
+
// Save parameters and callee-save registers. Order here should correspond
// to order of kBackup_ebx etc.
#ifdef V8_TARGET_OS_WIN
// MSVC passes arguments in rcx, rdx, r8, r9, with backing stack slots.
- // Store register parameters in pre-allocated stack slots,
- __ movq(Operand(rbp, kInputString), rcx);
- __ movq(Operand(rbp, kStartIndex), rdx); // Passed as int32 in edx.
- __ movq(Operand(rbp, kInputStart), r8);
- __ movq(Operand(rbp, kInputEnd), r9);
- // Callee-save on Win64.
+ // Store register parameters in pre-allocated stack slots.
+ __ movq(Operand(rbp, kInputString), arg_reg_1);
+ __ movq(Operand(rbp, kStartIndex), arg_reg_2); // Passed as int32 in edx.
+ __ movq(Operand(rbp, kInputStart), arg_reg_3);
+ __ movq(Operand(rbp, kInputEnd), arg_reg_4);
+
+ STATIC_ASSERT(kNumCalleeSaveRegisters == 3);
__ pushq(rsi);
__ pushq(rdi);
__ pushq(rbx);
@@ -701,14 +731,15 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
DCHECK_EQ(kInputEnd, -4 * kSystemPointerSize);
DCHECK_EQ(kRegisterOutput, -5 * kSystemPointerSize);
DCHECK_EQ(kNumOutputRegisters, -6 * kSystemPointerSize);
- __ pushq(rdi);
- __ pushq(rsi);
- __ pushq(rdx);
- __ pushq(rcx);
+ __ pushq(arg_reg_1);
+ __ pushq(arg_reg_2);
+ __ pushq(arg_reg_3);
+ __ pushq(arg_reg_4);
__ pushq(r8);
__ pushq(r9);
- __ pushq(rbx); // Callee-save
+ STATIC_ASSERT(kNumCalleeSaveRegisters == 1);
+ __ pushq(rbx);
#endif
STATIC_ASSERT(kSuccessfulCaptures ==
@@ -719,35 +750,50 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ Push(Immediate(0)); // Make room for "string start - 1" constant.
STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
__ Push(Immediate(0)); // The backtrack counter.
+ STATIC_ASSERT(kRegExpStackBasePointer ==
+ kBacktrackCount - kSystemPointerSize);
+ __ Push(Immediate(0)); // The regexp stack base ptr.
+
+ // Initialize backtrack stack pointer. It must not be clobbered from here on.
+ // Note the backtrack_stackpointer is *not* callee-saved.
+ STATIC_ASSERT(backtrack_stackpointer() == rcx);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
+ // Store the regexp base pointer - we'll later restore it / write it to
+ // memory when returning from this irregexp code object.
+ PushRegExpBasePointer(backtrack_stackpointer(), kScratchRegister);
+
+ {
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit, stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(isolate());
+ __ movq(r9, rsp);
+ __ Move(kScratchRegister, stack_limit);
+ __ subq(r9, Operand(kScratchRegister, 0));
+ // Handle it if the stack pointer is already below the stack limit.
+ __ j(below_equal, &stack_limit_hit);
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ cmpq(r9, Immediate(num_registers_ * kSystemPointerSize));
+ __ j(above_equal, &stack_ok);
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ Move(rax, EXCEPTION);
+ __ jmp(&return_rax);
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_jslimit(isolate());
- __ movq(rcx, rsp);
- __ Move(kScratchRegister, stack_limit);
- __ subq(rcx, Operand(kScratchRegister, 0));
- // Handle it if the stack pointer is already below the stack limit.
- __ j(below_equal, &stack_limit_hit);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ cmpq(rcx, Immediate(num_registers_ * kSystemPointerSize));
- __ j(above_equal, &stack_ok);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ Move(rax, EXCEPTION);
- __ jmp(&return_rax);
-
- __ bind(&stack_limit_hit);
- __ Move(code_object_pointer(), masm_.CodeObject());
- CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
- __ testq(rax, rax);
- // If returned value is non-zero, we exit with the returned value as result.
- __ j(not_zero, &return_rax);
+ __ bind(&stack_limit_hit);
+ __ Move(code_object_pointer(), masm_.CodeObject());
+ __ pushq(backtrack_stackpointer());
+ CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
+ __ popq(backtrack_stackpointer());
+ __ testq(rax, rax);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ j(not_zero, &return_rax);
- __ bind(&stack_ok);
+ __ bind(&stack_ok);
+ }
// Allocate space on stack for registers.
__ AllocateStackSpace(num_registers_ * kSystemPointerSize);
@@ -773,18 +819,23 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Initialize code object pointer.
__ Move(code_object_pointer(), masm_.CodeObject());
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
- __ j(not_equal, &load_char_start_regexp, Label::kNear);
- __ Move(current_character(), '\n');
- __ jmp(&start_regexp, Label::kNear);
+ Label load_char_start_regexp; // Execution restarts here for global regexps.
+ {
+ Label start_regexp;
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
+ // Load newline if index is at start, previous character otherwise.
+ __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
+ __ j(not_equal, &load_char_start_regexp, Label::kNear);
+ __ Move(current_character(), '\n');
+ __ jmp(&start_regexp, Label::kNear);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+
+ __ bind(&start_regexp);
+ }
// Initialize on-stack registers.
if (num_saved_registers_ > 0) {
@@ -792,13 +843,13 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Fill in stack push order, to avoid accessing across an unwritten
// page (a problem on Windows).
if (num_saved_registers_ > 8) {
- __ Move(rcx, kRegisterZero);
+ __ Move(r9, kRegisterZero);
Label init_loop;
__ bind(&init_loop);
- __ movq(Operand(rbp, rcx, times_1, 0), rax);
- __ subq(rcx, Immediate(kSystemPointerSize));
- __ cmpq(rcx, Immediate(kRegisterZero -
- num_saved_registers_ * kSystemPointerSize));
+ __ movq(Operand(rbp, r9, times_1, 0), rax);
+ __ subq(r9, Immediate(kSystemPointerSize));
+ __ cmpq(r9, Immediate(kRegisterZero -
+ num_saved_registers_ * kSystemPointerSize));
__ j(greater, &init_loop);
} else { // Unroll the loop.
for (int i = 0; i < num_saved_registers_; i++) {
@@ -807,9 +858,6 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
}
}
- // Initialize backtrack stack pointer.
- __ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
-
__ jmp(&start_label_);
// Exit code:
@@ -861,6 +909,10 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Prepare rax to initialize registers with its value in the next run.
__ movq(rax, Operand(rbp, kStringStartMinusOne));
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), kScratchRegister);
+
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// rdx: capture start index
@@ -894,19 +946,26 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
}
__ bind(&return_rax);
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), kScratchRegister);
+
#ifdef V8_TARGET_OS_WIN
// Restore callee save registers.
__ leaq(rsp, Operand(rbp, kLastCalleeSaveRegister));
+ STATIC_ASSERT(kNumCalleeSaveRegisters == 3);
__ popq(rbx);
__ popq(rdi);
__ popq(rsi);
// Stack now at rbp.
#else
// Restore callee save register.
+ STATIC_ASSERT(kNumCalleeSaveRegisters == 1);
__ movq(rbx, Operand(rbp, kBackup_rbx));
// Skip rsp to rbp.
__ movq(rsp, rbp);
#endif
+
// Exit function frame, restore previous one.
__ popq(rbp);
__ ret(0);
@@ -923,9 +982,10 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
- __ pushq(backtrack_stackpointer());
__ pushq(rdi);
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), kScratchRegister);
+
CallCheckStackGuardState();
__ testq(rax, rax);
// If returning non-zero, we should end execution with the given
@@ -935,7 +995,9 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Restore registers.
__ Move(code_object_pointer(), masm_.CodeObject());
__ popq(rdi);
- __ popq(backtrack_stackpointer());
+
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
// String might have moved: Reload esi from frame.
__ movq(rsi, Operand(rbp, kInputEnd));
SafeReturn();
@@ -953,25 +1015,19 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ pushq(rdi);
#endif
- // Call GrowStack(backtrack_stackpointer())
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments);
-#ifdef V8_TARGET_OS_WIN
- // Microsoft passes parameters in rcx, rdx, r8.
- // First argument, backtrack stackpointer, is already in rcx.
- __ leaq(rdx, Operand(rbp, kStackHighEnd)); // Second argument
- __ LoadAddress(r8, ExternalReference::isolate_address(isolate()));
-#else
- // AMD64 ABI passes parameters in rdi, rsi, rdx.
- __ movq(rdi, backtrack_stackpointer()); // First argument.
- __ leaq(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
- __ LoadAddress(rdx, ExternalReference::isolate_address(isolate()));
-#endif
+ // Call GrowStack(isolate).
+
+ StoreRegExpStackPointerToMemory(backtrack_stackpointer(), kScratchRegister);
+
+ static constexpr int kNumArguments = 1;
+ __ PrepareCallCFunction(kNumArguments);
+ __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
+
ExternalReference grow_stack =
ExternalReference::re_grow_stack(isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // If return nullptr, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
+ __ CallCFunction(grow_stack, kNumArguments);
+ // If nullptr is returned, we have failed to grow the stack, and must exit
+ // with a stack-overflow exception.
__ testq(rax, rax);
__ j(equal, &exit_with_exception);
// Otherwise use return value as new stack pointer.
@@ -1085,13 +1141,25 @@ void RegExpMacroAssemblerX64::ReadPositionFromRegister(Register dst, int reg) {
__ movq(dst, register_location(reg));
}
+// Preserves a position-independent representation of the stack pointer in reg:
+// reg = top - sp.
+void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) {
+ ExternalReference stack_top_address =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ movq(rax, __ ExternalReferenceAsOperand(stack_top_address, rax));
+ __ subq(rax, backtrack_stackpointer());
+ __ movq(register_location(reg), rax);
+}
void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
- __ movq(backtrack_stackpointer(), register_location(reg));
- __ addq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
+ ExternalReference stack_top_address =
+ ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
+ __ movq(backtrack_stackpointer(),
+ __ ExternalReferenceAsOperand(stack_top_address,
+ backtrack_stackpointer()));
+ __ subq(backtrack_stackpointer(), register_location(reg));
}
-
void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
Label after_position;
__ cmpq(rdi, Immediate(-by * char_size()));
@@ -1136,14 +1204,6 @@ void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
}
}
-
-void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) {
- __ movq(rax, backtrack_stackpointer());
- __ subq(rax, Operand(rbp, kStackHighEnd));
- __ movq(register_location(reg), rax);
-}
-
-
// Private methods:
void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index c3a3cb90f2..69bb399c3e 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -5,9 +5,7 @@
#ifndef V8_REGEXP_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
#define V8_REGEXP_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
-#include "src/base/strings.h"
#include "src/codegen/macro-assembler.h"
-#include "src/codegen/x64/assembler-x64.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/zone/zone-chunk-list.h"
@@ -110,9 +108,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
// this value. NumOutputRegisters is passed as 32-bit value. The upper
// 32 bit of this 64-bit stack slot may contain garbage.
static const int kNumOutputRegisters = kRegisterOutput + kSystemPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kSystemPointerSize;
// DirectCall is passed as 32 bit int (values 0 or 1).
- static const int kDirectCall = kStackHighEnd + kSystemPointerSize;
+ static const int kDirectCall = kNumOutputRegisters + kSystemPointerSize;
static const int kIsolate = kDirectCall + kSystemPointerSize;
#else
// In AMD64 ABI Calling Convention, the first six integer parameters
@@ -123,28 +120,26 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
static const int kInputStart = kStartIndex - kSystemPointerSize;
static const int kInputEnd = kInputStart - kSystemPointerSize;
static const int kRegisterOutput = kInputEnd - kSystemPointerSize;
-
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value.
static const int kNumOutputRegisters = kRegisterOutput - kSystemPointerSize;
- static const int kStackHighEnd = kFrameAlign;
- static const int kDirectCall = kStackHighEnd + kSystemPointerSize;
+
+ static const int kDirectCall = kFrameAlign;
static const int kIsolate = kDirectCall + kSystemPointerSize;
#endif
+ // We push callee-save registers that we use after the frame pointer (and
+ // after the parameters).
#ifdef V8_TARGET_OS_WIN
- // Microsoft calling convention has three callee-saved registers
- // (that we are using). We push these after the frame pointer.
static const int kBackup_rsi = kFramePointer - kSystemPointerSize;
static const int kBackup_rdi = kBackup_rsi - kSystemPointerSize;
static const int kBackup_rbx = kBackup_rdi - kSystemPointerSize;
+ static const int kNumCalleeSaveRegisters = 3;
static const int kLastCalleeSaveRegister = kBackup_rbx;
#else
- // AMD64 Calling Convention has only one callee-save register that
- // we use. We push this after the frame pointer (and after the
- // parameters).
static const int kBackup_rbx = kNumOutputRegisters - kSystemPointerSize;
+ static const int kNumCalleeSaveRegisters = 1;
static const int kLastCalleeSaveRegister = kBackup_rbx;
#endif
@@ -155,9 +150,14 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
static const int kStringStartMinusOne =
kSuccessfulCaptures - kSystemPointerSize;
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // Stores the initial value of the regexp stack pointer in a
+ // position-independent representation (in case the regexp stack grows and
+ // thus moves).
+ static const int kRegExpStackBasePointer =
+ kBacktrackCount - kSystemPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
+ static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize;
// Initial size of code buffer.
static const int kRegExpCodeSize = 1024;
@@ -175,14 +175,14 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
Operand register_location(int register_index);
// The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return rdx; }
+ static constexpr Register current_character() { return rdx; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- inline Register backtrack_stackpointer() { return rcx; }
+ static constexpr Register backtrack_stackpointer() { return rcx; }
// The registers containing a self pointer to this code's Code object.
- inline Register code_object_pointer() { return r8; }
+ static constexpr Register code_object_pointer() { return r8; }
// Byte size of chars in the string to match (decided by the Mode argument)
inline int char_size() { return static_cast<int>(mode_); }
@@ -224,24 +224,36 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
// Increments the stack pointer (rcx) by a word size.
inline void Drop();
+ void LoadRegExpStackPointerFromMemory(Register dst);
+ void StoreRegExpStackPointerToMemory(Register src, Register scratch);
+ void PushRegExpBasePointer(Register scratch_pointer, Register scratch);
+ void PopRegExpBasePointer(Register scratch_pointer_out, Register scratch);
+
inline void ReadPositionFromRegister(Register dst, int reg);
Isolate* isolate() const { return masm_.isolate(); }
MacroAssembler masm_;
- NoRootArrayScope no_root_array_scope_;
+
+ // On x64, there is no reason to keep the kRootRegister uninitialized; we
+ // could easily use it by 1. initializing it and 2. storing/restoring it
+ // as callee-save on entry/exit.
+ // But: on other platforms, specifically ia32, it would be tricky to enable
+ // the kRootRegister since it's currently used for other purposes. Thus, for
+ // consistency, we also keep it uninitialized here.
+ const NoRootArrayScope no_root_array_scope_;
ZoneChunkList<int> code_relative_fixup_positions_;
// Which mode to generate code for (LATIN1 or UC16).
- Mode mode_;
+ const Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1)
- int num_saved_registers_;
+ const int num_saved_registers_;
// Labels used internally.
Label entry_label_;
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 1cf4f9f644..b584a7de99 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -298,7 +298,8 @@ bool AddDescriptorsByTemplate(
int count = 0;
for (InternalIndex i : InternalIndex::Range(nof_descriptors)) {
PropertyDetails details = descriptors_template->GetDetails(i);
- if (details.location() == kDescriptor && details.kind() == kData) {
+ if (details.location() == PropertyLocation::kDescriptor &&
+ details.kind() == kData) {
count++;
}
}
@@ -319,7 +320,7 @@ bool AddDescriptorsByTemplate(
Name name = descriptors_template->GetKey(i);
DCHECK(name.IsUniqueName());
PropertyDetails details = descriptors_template->GetDetails(i);
- if (details.location() == kDescriptor) {
+ if (details.location() == PropertyLocation::kDescriptor) {
if (details.kind() == kData) {
if (value.IsSmi()) {
value = GetMethodWithSharedName(isolate, args, value);
@@ -344,11 +345,13 @@ bool AddDescriptorsByTemplate(
UNREACHABLE();
}
DCHECK(value.FitsRepresentation(details.representation()));
- if (details.location() == kDescriptor && details.kind() == kData) {
- details = PropertyDetails(details.kind(), details.attributes(), kField,
- PropertyConstness::kConst,
- details.representation(), field_index)
- .set_pointer(details.pointer());
+ if (details.location() == PropertyLocation::kDescriptor &&
+ details.kind() == kData) {
+ details =
+ PropertyDetails(details.kind(), details.attributes(),
+ PropertyLocation::kField, PropertyConstness::kConst,
+ details.representation(), field_index)
+ .set_pointer(details.pointer());
property_array->set(field_index, value);
field_index++;
@@ -626,7 +629,12 @@ MaybeHandle<Object> DefineClass(Isolate* isolate,
Handle<JSObject> prototype = CreateClassPrototype(isolate);
DCHECK_EQ(*constructor, args[ClassBoilerplate::kConstructorArgumentIndex]);
- args.set_at(ClassBoilerplate::kPrototypeArgumentIndex, *prototype);
+ // Temporarily change ClassBoilerplate::kPrototypeArgumentIndex for the
+ // subsequent calls, but use a scope to make sure to change it back before
+ // returning, to not corrupt the caller's argument frame (in particular, for
+ // the interpreter, to not clobber the register frame).
+ RuntimeArguments::ChangeValueScope set_prototype_value_scope(
+ isolate, &args, ClassBoilerplate::kPrototypeArgumentIndex, *prototype);
if (!InitClassConstructor(isolate, class_boilerplate, constructor_parent,
constructor, args) ||
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index cb92eae13c..588dce9222 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -335,16 +335,15 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
"[[ArrayBufferByteLength]]"),
isolate->factory()->NewNumberFromSize(byte_length));
- // Use the backing store pointer as a unique ID
- base::EmbeddedVector<char, 32> buffer_data_vec;
- int len =
- SNPrintF(buffer_data_vec, V8PRIxPTR_FMT,
- reinterpret_cast<Address>(js_array_buffer->backing_store()));
+ auto backing_store = js_array_buffer->GetBackingStore();
+ Handle<Object> array_buffer_data =
+ backing_store
+ ? isolate->factory()->NewNumberFromUint(backing_store->id())
+ : isolate->factory()->null_value();
result = ArrayList::Add(
isolate, result,
isolate->factory()->NewStringFromAsciiChecked("[[ArrayBufferData]]"),
- isolate->factory()->InternalizeUtf8String(
- buffer_data_vec.SubVector(0, len)));
+ array_buffer_data);
Handle<Symbol> memory_symbol =
isolate->factory()->array_buffer_wasm_memory_symbol();
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 31e50fa3e8..958bc2277f 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -110,7 +110,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
copy->map(isolate).instance_descriptors(isolate), isolate);
for (InternalIndex i : copy->map(isolate).IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
- DCHECK_EQ(kField, details.location());
+ DCHECK_EQ(PropertyLocation::kField, details.location());
DCHECK_EQ(kData, details.kind());
FieldIndex index = FieldIndex::ForPropertyIndex(
copy->map(isolate), details.field_index(),
@@ -678,7 +678,8 @@ RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
Handle<String> source(String::cast(regexp_instance->source()), isolate);
Handle<RegExpBoilerplateDescription> boilerplate =
isolate->factory()->NewRegExpBoilerplateDescription(
- data, source, Smi::cast(regexp_instance->flags()));
+ data, source,
+ Smi::FromInt(static_cast<int>(regexp_instance->flags())));
vector->SynchronizedSet(literal_slot, *boilerplate);
DCHECK(HasBoilerplate(
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index bec54bd8d4..3da21358d8 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -186,7 +186,7 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// Zap the property to avoid keeping objects alive. Zapping is not necessary
// for properties stored in the descriptor array.
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
DisallowGarbageCollection no_gc;
// Invalidate slots manually later in case we delete an in-object tagged
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index c52449a642..eb16e9c24f 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -333,8 +333,8 @@ bool CompiledReplacement::Compile(Isolate* isolate, Handle<JSRegExp> regexp,
FixedArray capture_name_map;
if (capture_count > 0) {
- DCHECK(JSRegExp::TypeSupportsCaptures(regexp->TypeTag()));
- Object maybe_capture_name_map = regexp->CaptureNameMap();
+ DCHECK(JSRegExp::TypeSupportsCaptures(regexp->type_tag()));
+ Object maybe_capture_name_map = regexp->capture_name_map();
if (maybe_capture_name_map.IsFixedArray()) {
capture_name_map = FixedArray::cast(maybe_capture_name_map);
}
@@ -550,9 +550,8 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalAtomRegExpWithString(
std::vector<int>* indices = GetRewoundRegexpIndicesList(isolate);
- DCHECK_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag());
- String pattern =
- String::cast(pattern_regexp->DataAt(JSRegExp::kAtomPatternIndex));
+ DCHECK_EQ(JSRegExp::ATOM, pattern_regexp->type_tag());
+ String pattern = pattern_regexp->atom_pattern();
int subject_len = subject->length();
int pattern_len = pattern.length();
int replacement_len = replacement->length();
@@ -595,7 +594,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalAtomRegExpWithString(
// Copy non-matched subject content.
if (subject_pos < index) {
String::WriteToFlat(*subject, result->GetChars(no_gc) + result_pos,
- subject_pos, index);
+ subject_pos, index - subject_pos);
result_pos += index - subject_pos;
}
@@ -611,7 +610,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalAtomRegExpWithString(
// Add remaining subject content at the end.
if (subject_pos < subject_len) {
String::WriteToFlat(*subject, result->GetChars(no_gc) + result_pos,
- subject_pos, subject_len);
+ subject_pos, subject_len - subject_pos);
}
int32_t match_indices[] = {indices->back(), indices->back() + pattern_len};
@@ -628,7 +627,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithString(
DCHECK(subject->IsFlat());
DCHECK(replacement->IsFlat());
- int capture_count = regexp->CaptureCount();
+ int capture_count = regexp->capture_count();
int subject_length = subject->length();
// Ensure the RegExp is compiled so we can access the capture-name map.
@@ -641,7 +640,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithString(
isolate, regexp, replacement, capture_count, subject_length);
// Shortcut for simple non-regexp global replacements
- if (regexp->TypeTag() == JSRegExp::ATOM && simple_replace) {
+ if (regexp->type_tag() == JSRegExp::ATOM && simple_replace) {
if (subject->IsOneByteRepresentation() &&
replacement->IsOneByteRepresentation()) {
return StringReplaceGlobalAtomRegExpWithString<SeqOneByteString>(
@@ -706,7 +705,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithEmptyString(
DCHECK(subject->IsFlat());
// Shortcut for simple non-regexp global replacements
- if (regexp->TypeTag() == JSRegExp::ATOM) {
+ if (regexp->type_tag() == JSRegExp::ATOM) {
Handle<String> empty_string = isolate->factory()->empty_string();
if (subject->IsOneByteRepresentation()) {
return StringReplaceGlobalAtomRegExpWithString<SeqOneByteString>(
@@ -728,7 +727,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithEmptyString(
int start = current_match[0];
int end = current_match[1];
- int capture_count = regexp->CaptureCount();
+ int capture_count = regexp->capture_count();
int subject_length = subject->length();
int new_length = subject_length - (end - start);
@@ -753,7 +752,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithEmptyString(
if (prev < start) {
// Add substring subject[prev;start] to answer string.
String::WriteToFlat(*subject, answer->GetChars(no_gc) + position, prev,
- start);
+ start - prev);
position += start - prev;
}
prev = end;
@@ -769,7 +768,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithEmptyString(
if (prev < subject_length) {
// Add substring subject[prev;length] to answer string.
String::WriteToFlat(*subject, answer->GetChars(no_gc) + position, prev,
- subject_length);
+ subject_length - prev);
position += subject_length - prev;
}
@@ -967,7 +966,7 @@ RUNTIME_FUNCTION(Runtime_RegExpBuildIndices) {
CONVERT_ARG_HANDLE_CHECKED(Object, maybe_names, 2);
#ifdef DEBUG
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
- DCHECK(regexp->GetFlags() & JSRegExp::kHasIndices);
+ DCHECK(regexp->flags() & JSRegExp::kHasIndices);
#endif
return *JSRegExpResultIndices::BuildIndices(isolate, match_info, maybe_names);
@@ -983,8 +982,8 @@ class MatchInfoBackedMatch : public String::Match {
: isolate_(isolate), match_info_(match_info) {
subject_ = String::Flatten(isolate, subject);
- if (JSRegExp::TypeSupportsCaptures(regexp->TypeTag())) {
- Object o = regexp->CaptureNameMap();
+ if (JSRegExp::TypeSupportsCaptures(regexp->type_tag())) {
+ Object o = regexp->capture_name_map();
has_named_captures_ = o.IsFixedArray();
if (has_named_captures_) {
capture_name_map_ = handle(FixedArray::cast(o), isolate);
@@ -1165,7 +1164,7 @@ static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
Handle<RegExpMatchInfo> last_match_array,
Handle<JSArray> result_array) {
DCHECK(RegExpUtils::IsUnmodifiedRegExp(isolate, regexp));
- DCHECK_NE(has_capture, regexp->CaptureCount() == 0);
+ DCHECK_NE(has_capture, regexp->capture_count() == 0);
DCHECK(subject->IsFlat());
// Force tier up to native code for global replaces. The global replace is
@@ -1173,7 +1172,7 @@ static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
// native code expects an array to store all the matches, and the bytecode
// matches one at a time, so it's easier to tier-up to native code from the
// start.
- if (FLAG_regexp_tier_up && regexp->TypeTag() == JSRegExp::IRREGEXP) {
+ if (FLAG_regexp_tier_up && regexp->type_tag() == JSRegExp::IRREGEXP) {
regexp->MarkTierUpForNextExec();
if (FLAG_trace_regexp_tier_up) {
PrintF("Forcing tier-up of JSRegExp object %p in SearchRegExpMultiple\n",
@@ -1181,7 +1180,7 @@ static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
}
}
- int capture_count = regexp->CaptureCount();
+ int capture_count = regexp->capture_count();
int subject_length = subject->length();
static const int kMinLengthToCache = 0x1000;
@@ -1260,7 +1259,7 @@ static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
// subject, i.e., 3 + capture count in total. If the RegExp contains
// named captures, they are also passed as the last argument.
- Handle<Object> maybe_capture_map(regexp->CaptureNameMap(), isolate);
+ Handle<Object> maybe_capture_map(regexp->capture_name_map(), isolate);
const bool has_named_captures = maybe_capture_map->IsFixedArray();
const int argc =
@@ -1350,7 +1349,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<String> RegExpReplace(
Factory* factory = isolate->factory();
- const int flags = regexp->GetFlags();
+ const int flags = regexp->flags();
const bool global = (flags & JSRegExp::kGlobal) != 0;
const bool sticky = (flags & JSRegExp::kSticky) != 0;
@@ -1422,7 +1421,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<String> RegExpReplace(
// native code expects an array to store all the matches, and the bytecode
// matches one at a time, so it's easier to tier-up to native code from the
// start.
- if (FLAG_regexp_tier_up && regexp->TypeTag() == JSRegExp::IRREGEXP) {
+ if (FLAG_regexp_tier_up && regexp->type_tag() == JSRegExp::IRREGEXP) {
regexp->MarkTierUpForNextExec();
if (FLAG_trace_regexp_tier_up) {
PrintF("Forcing tier-up of JSRegExp object %p in RegExpReplace\n",
@@ -1472,10 +1471,10 @@ RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
CHECK(result_array->HasObjectElements());
subject = String::Flatten(isolate, subject);
- CHECK(regexp->GetFlags() & JSRegExp::kGlobal);
+ CHECK(regexp->flags() & JSRegExp::kGlobal);
Object result;
- if (regexp->CaptureCount() == 0) {
+ if (regexp->capture_count() == 0) {
result = SearchRegExpMultiple<false>(isolate, subject, regexp,
last_match_info, result_array);
} else {
@@ -1499,7 +1498,7 @@ RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
Factory* factory = isolate->factory();
Handle<RegExpMatchInfo> last_match_info = isolate->regexp_last_match_info();
- const int flags = regexp->GetFlags();
+ const int flags = regexp->flags();
DCHECK_EQ(flags & JSRegExp::kGlobal, 0);
// TODO(jgruber): This should be an easy port to CSA with massive payback.
@@ -1552,9 +1551,9 @@ RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
bool has_named_captures = false;
Handle<FixedArray> capture_map;
if (m > 1) {
- DCHECK(JSRegExp::TypeSupportsCaptures(regexp->TypeTag()));
+ DCHECK(JSRegExp::TypeSupportsCaptures(regexp->type_tag()));
- Object maybe_capture_map = regexp->CaptureNameMap();
+ Object maybe_capture_map = regexp->capture_name_map();
if (maybe_capture_map.IsFixedArray()) {
has_named_captures = true;
capture_map = handle(FixedArray::cast(maybe_capture_map), isolate);
@@ -2015,7 +2014,7 @@ RUNTIME_FUNCTION(Runtime_RegExpStringFromFlags) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
- Handle<String> flags = JSRegExp::StringFromFlags(isolate, regexp.GetFlags());
+ Handle<String> flags = JSRegExp::StringFromFlags(isolate, regexp.flags());
return *flags;
}
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 3b49e8a891..38cdf57671 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -983,11 +983,11 @@ RUNTIME_FUNCTION(Runtime_AbortJS) {
UNREACHABLE();
}
-RUNTIME_FUNCTION(Runtime_AbortCSAAssert) {
+RUNTIME_FUNCTION(Runtime_AbortCSADcheck) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, message, 0);
- base::OS::PrintError("abort: CSA_ASSERT failed: %s\n",
+ base::OS::PrintError("abort: CSA_DCHECK failed: %s\n",
message->ToCString().get());
isolate->PrintStack(stderr);
base::OS::Abort();
@@ -1135,8 +1135,8 @@ RUNTIME_FUNCTION(Runtime_RegexpHasBytecode) {
CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
CONVERT_BOOLEAN_ARG_CHECKED(is_latin1, 1);
bool result;
- if (regexp.TypeTag() == JSRegExp::IRREGEXP) {
- result = regexp.Bytecode(is_latin1).IsByteArray();
+ if (regexp.type_tag() == JSRegExp::IRREGEXP) {
+ result = regexp.bytecode(is_latin1).IsByteArray();
} else {
result = false;
}
@@ -1149,8 +1149,8 @@ RUNTIME_FUNCTION(Runtime_RegexpHasNativeCode) {
CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
CONVERT_BOOLEAN_ARG_CHECKED(is_latin1, 1);
bool result;
- if (regexp.TypeTag() == JSRegExp::IRREGEXP) {
- result = regexp.Code(is_latin1).IsCodeT();
+ if (regexp.type_tag() == JSRegExp::IRREGEXP) {
+ result = regexp.code(is_latin1).IsCodeT();
} else {
result = false;
}
@@ -1162,7 +1162,7 @@ RUNTIME_FUNCTION(Runtime_RegexpTypeTag) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
const char* type_str;
- switch (regexp.TypeTag()) {
+ switch (regexp.type_tag()) {
case JSRegExp::NOT_COMPILED:
type_str = "NOT_COMPILED";
break;
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index df4ea14164..9083a0dcdd 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -245,7 +245,7 @@ RUNTIME_FUNCTION(Runtime_WasmCompileWrapper) {
const wasm::WasmModule* module = instance->module();
const int function_index = function_data->function_index();
- const wasm::WasmFunction function = module->functions[function_index];
+ const wasm::WasmFunction& function = module->functions[function_index];
const wasm::FunctionSig* sig = function.sig;
// The start function is not guaranteed to be registered as
@@ -277,8 +277,8 @@ RUNTIME_FUNCTION(Runtime_WasmCompileWrapper) {
continue;
}
int index = static_cast<int>(exp.index);
- wasm::WasmFunction function = module->functions[index];
- if (function.sig == sig && index != function_index) {
+ const wasm::WasmFunction& exp_function = module->functions[index];
+ if (exp_function.sig == sig && index != function_index) {
ReplaceWrapper(isolate, instance, index, wrapper_code);
}
}
@@ -572,9 +572,8 @@ RUNTIME_FUNCTION(Runtime_WasmDebugBreak) {
i::WeakArrayList weak_instance_list = script->wasm_weak_instance_list();
for (int i = 0; i < weak_instance_list.length(); ++i) {
if (weak_instance_list.Get(i)->IsCleared()) continue;
- i::WasmInstanceObject instance = i::WasmInstanceObject::cast(
- weak_instance_list.Get(i)->GetHeapObject());
- instance.set_break_on_entry(false);
+ i::WasmInstanceObject::cast(weak_instance_list.Get(i)->GetHeapObject())
+ .set_break_on_entry(false);
}
DCHECK(!instance->break_on_entry());
Handle<FixedArray> on_entry_breakpoints;
@@ -641,7 +640,7 @@ inline void* ArrayElementAddress(Handle<WasmArray> array, uint32_t index,
}
} // namespace
-// Assumes copy ranges are in-bounds.
+// Assumes copy ranges are in-bounds and copy length > 0.
RUNTIME_FUNCTION(Runtime_WasmArrayCopy) {
ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
@@ -651,6 +650,7 @@ RUNTIME_FUNCTION(Runtime_WasmArrayCopy) {
CONVERT_ARG_HANDLE_CHECKED(WasmArray, src_array, 2);
CONVERT_UINT32_ARG_CHECKED(src_index, 3);
CONVERT_UINT32_ARG_CHECKED(length, 4);
+ DCHECK_GT(length, 0);
bool overlapping_ranges =
dst_array->ptr() == src_array->ptr() &&
(dst_index < src_index ? dst_index + length > src_index
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index fed9c01416..2e6fc6fa6e 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -463,7 +463,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_TEST(F, I) \
F(Abort, 1, 1) \
- F(AbortCSAAssert, 1, 1) \
+ F(AbortCSADcheck, 1, 1) \
F(AbortJS, 1, 1) \
F(ArrayIteratorProtector, 0, 1) \
F(ArraySpeciesProtector, 0, 1) \
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 9f32faf67a..597da11081 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -510,7 +510,7 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
if (buffer->GetBackingStoreRefForDeserialization() != kNullRefSentinel) {
new_off_heap_array_buffers_.push_back(buffer);
} else {
- buffer->set_backing_store(main_thread_isolate(), nullptr);
+ buffer->set_backing_store(nullptr);
}
} else if (InstanceTypeChecker::IsBytecodeArray(instance_type)) {
// TODO(mythria): Remove these once we store the default values for these
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 47221dd952..aa595a67a2 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -6,6 +6,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/common/globals.h"
+#include "src/handles/global-handles-inl.h"
#include "src/heap/heap-inl.h" // For Space::identity().
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/read-only-heap.h"
@@ -48,6 +49,10 @@ Serializer::Serializer(Isolate* isolate, Snapshot::SerializerFlags flags)
#endif // OBJECT_PRINT
}
+#ifdef DEBUG
+void Serializer::PopStack() { stack_.Pop(); }
+#endif
+
void Serializer::CountAllocation(Map map, int size, SnapshotSpace space) {
DCHECK(FLAG_serialization_statistics);
@@ -536,7 +541,7 @@ void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
SerializeObject();
- buffer->set_backing_store(isolate(), backing_store);
+ buffer->set_backing_store(backing_store);
buffer->set_extension(extension);
}
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index 82b1d8ed1e..3695e21719 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -280,7 +280,7 @@ class Serializer : public SerializerDeserializer {
#ifdef DEBUG
void PushStack(Handle<HeapObject> o) { stack_.Push(*o); }
- void PopStack() { stack_.Pop(); }
+ void PopStack();
void PrintStack();
void PrintStack(std::ostream&);
#endif // DEBUG
diff --git a/deps/v8/src/strings/string-builder.cc b/deps/v8/src/strings/string-builder.cc
index 71534d635f..7135d556bc 100644
--- a/deps/v8/src/strings/string-builder.cc
+++ b/deps/v8/src/strings/string-builder.cc
@@ -34,7 +34,7 @@ void StringBuilderConcatHelper(String special, sinkchar* sink,
pos = Smi::ToInt(obj);
len = -encoded_slice;
}
- String::WriteToFlat(special, sink + position, pos, pos + len);
+ String::WriteToFlat(special, sink + position, pos, len);
position += len;
} else {
String string = String::cast(element);
diff --git a/deps/v8/src/strings/string-stream.cc b/deps/v8/src/strings/string-stream.cc
index 66140a1455..39494a7827 100644
--- a/deps/v8/src/strings/string-stream.cc
+++ b/deps/v8/src/strings/string-stream.cc
@@ -302,7 +302,7 @@ void StringStream::PrintUsingMap(JSObject js_object) {
DescriptorArray descs = map.instance_descriptors(js_object.GetIsolate());
for (InternalIndex i : map.IterateOwnDescriptors()) {
PropertyDetails details = descs.GetDetails(i);
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
DCHECK_EQ(kData, details.kind());
Object key = descs.GetKey(i);
if (key.IsString() || key.IsNumber()) {
diff --git a/deps/v8/src/torque/ast.h b/deps/v8/src/torque/ast.h
index 2e1aed3ec9..d5c99d4890 100644
--- a/deps/v8/src/torque/ast.h
+++ b/deps/v8/src/torque/ast.h
@@ -625,18 +625,18 @@ struct BasicTypeExpression : TypeExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(BasicTypeExpression)
BasicTypeExpression(SourcePosition pos,
std::vector<std::string> namespace_qualification,
- std::string name,
+ Identifier* name,
std::vector<TypeExpression*> generic_arguments)
: TypeExpression(kKind, pos),
namespace_qualification(std::move(namespace_qualification)),
- is_constexpr(IsConstexprName(name)),
- name(std::move(name)),
+ is_constexpr(IsConstexprName(name->value)),
+ name(name),
generic_arguments(std::move(generic_arguments)) {}
- BasicTypeExpression(SourcePosition pos, std::string name)
- : BasicTypeExpression(pos, {}, std::move(name), {}) {}
+ BasicTypeExpression(SourcePosition pos, Identifier* name)
+ : BasicTypeExpression(pos, {}, name, {}) {}
std::vector<std::string> namespace_qualification;
bool is_constexpr;
- std::string name;
+ Identifier* name;
std::vector<TypeExpression*> generic_arguments;
};
@@ -721,7 +721,7 @@ struct DebugStatement : Statement {
struct AssertStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(AssertStatement)
- enum class AssertKind { kAssert, kCheck, kStaticAssert };
+ enum class AssertKind { kDcheck, kCheck, kStaticAssert };
AssertStatement(SourcePosition pos, AssertKind kind, Expression* expression,
std::string source)
: Statement(kKind, pos),
@@ -939,7 +939,6 @@ struct ClassFieldExpression {
std::vector<ConditionalAnnotation> conditions;
bool weak;
bool const_qualified;
- bool generate_verify;
FieldSynchronization read_synchronization;
FieldSynchronization write_synchronization;
};
@@ -1306,10 +1305,9 @@ inline VarDeclarationStatement* MakeConstDeclarationStatement(
}
inline BasicTypeExpression* MakeBasicTypeExpression(
- std::vector<std::string> namespace_qualification, std::string name,
+ std::vector<std::string> namespace_qualification, Identifier* name,
std::vector<TypeExpression*> generic_arguments = {}) {
- return MakeNode<BasicTypeExpression>(std::move(namespace_qualification),
- std::move(name),
+ return MakeNode<BasicTypeExpression>(std::move(namespace_qualification), name,
std::move(generic_arguments));
}
diff --git a/deps/v8/src/torque/cc-generator.cc b/deps/v8/src/torque/cc-generator.cc
index 0dea634ba4..a1f4d496cf 100644
--- a/deps/v8/src/torque/cc-generator.cc
+++ b/deps/v8/src/torque/cc-generator.cc
@@ -105,7 +105,6 @@ std::vector<std::string> CCGenerator::ProcessArgumentsCommon(
std::vector<std::string> args;
for (auto it = parameter_types.rbegin(); it != parameter_types.rend(); ++it) {
const Type* type = *it;
- VisitResult arg;
if (type->IsConstexpr()) {
args.push_back(std::move(constexpr_arguments.back()));
constexpr_arguments.pop_back();
diff --git a/deps/v8/src/torque/constants.h b/deps/v8/src/torque/constants.h
index 57ff3ef4e1..6490a30d38 100644
--- a/deps/v8/src/torque/constants.h
+++ b/deps/v8/src/torque/constants.h
@@ -83,8 +83,6 @@ static const char* const FIXED_ARRAY_BASE_TYPE_STRING = "FixedArrayBase";
static const char* const WEAK_HEAP_OBJECT = "WeakHeapObject";
static const char* const STATIC_ASSERT_MACRO_STRING = "StaticAssert";
-static const char* const ANNOTATION_GENERATE_PRINT = "@generatePrint";
-static const char* const ANNOTATION_NO_VERIFIER = "@noVerifier";
static const char* const ANNOTATION_ABSTRACT = "@abstract";
static const char* const ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT =
"@hasSameInstanceTypeAsParent";
@@ -144,21 +142,19 @@ using AbstractTypeFlags = base::Flags<AbstractTypeFlag>;
enum class ClassFlag {
kNone = 0,
kExtern = 1 << 0,
- kGeneratePrint = 1 << 1,
- kGenerateVerify = 1 << 2,
- kTransient = 1 << 3,
- kAbstract = 1 << 4,
- kIsShape = 1 << 5,
- kHasSameInstanceTypeAsParent = 1 << 6,
- kGenerateCppClassDefinitions = 1 << 7,
- kCustomCppClass = 1 << 8,
- kHighestInstanceTypeWithinParent = 1 << 9,
- kLowestInstanceTypeWithinParent = 1 << 10,
- kUndefinedLayout = 1 << 11,
- kGenerateBodyDescriptor = 1 << 12,
- kExport = 1 << 13,
- kDoNotGenerateCast = 1 << 14,
- kCustomMap = 1 << 15,
+ kTransient = 1 << 1,
+ kAbstract = 1 << 2,
+ kIsShape = 1 << 3,
+ kHasSameInstanceTypeAsParent = 1 << 4,
+ kGenerateCppClassDefinitions = 1 << 5,
+ kCustomCppClass = 1 << 6,
+ kHighestInstanceTypeWithinParent = 1 << 7,
+ kLowestInstanceTypeWithinParent = 1 << 8,
+ kUndefinedLayout = 1 << 9,
+ kGenerateBodyDescriptor = 1 << 10,
+ kExport = 1 << 11,
+ kDoNotGenerateCast = 1 << 12,
+ kCustomMap = 1 << 13,
};
using ClassFlags = base::Flags<ClassFlag>;
diff --git a/deps/v8/src/torque/cpp-builder.cc b/deps/v8/src/torque/cpp-builder.cc
index 425fae920e..f48ca562e9 100644
--- a/deps/v8/src/torque/cpp-builder.cc
+++ b/deps/v8/src/torque/cpp-builder.cc
@@ -14,6 +14,8 @@ void Function::PrintDeclarationHeader(std::ostream& stream,
if (!description_.empty()) {
stream << std::string(indentation, ' ') << "// " << description_ << "\n";
}
+ stream << std::string(indentation, ' ') << "// " << PositionAsString(pos_)
+ << "\n";
stream << std::string(indentation, ' ');
if (IsExport()) stream << "V8_EXPORT_PRIVATE ";
if (IsV8Inline())
@@ -36,6 +38,9 @@ void Function::PrintDeclarationHeader(std::ostream& stream,
}
void Function::PrintDeclaration(std::ostream& stream, int indentation) const {
+ if (indentation == kAutomaticIndentation) {
+ indentation = owning_class_ ? 2 : 0;
+ }
PrintDeclarationHeader(stream, indentation);
stream << ";\n";
}
@@ -63,6 +68,8 @@ void Function::PrintInlineDefinition(
void Function::PrintBeginDefinition(std::ostream& stream,
int indentation) const {
+ stream << std::string(indentation, ' ') << "// " << PositionAsString(pos_)
+ << "\n";
std::string scope;
if (owning_class_) {
scope = owning_class_->GetName();
@@ -116,7 +123,7 @@ void File::BeginIncludeGuard(const std::string& name) {
s() << "#ifndef " << name
<< "\n"
"#define "
- << name << "\n";
+ << name << "\n\n";
}
void File::EndIncludeGuard(const std::string& name) {
diff --git a/deps/v8/src/torque/cpp-builder.h b/deps/v8/src/torque/cpp-builder.h
index f741ae6ded..428862c4e2 100644
--- a/deps/v8/src/torque/cpp-builder.h
+++ b/deps/v8/src/torque/cpp-builder.h
@@ -71,9 +71,13 @@ class Function {
};
explicit Function(std::string name)
- : owning_class_(nullptr), name_(std::move(name)) {}
+ : pos_(CurrentSourcePosition::Get()),
+ owning_class_(nullptr),
+ name_(std::move(name)) {}
Function(Class* owning_class, std::string name)
- : owning_class_(owning_class), name_(std::move(name)) {}
+ : pos_(CurrentSourcePosition::Get()),
+ owning_class_(owning_class),
+ name_(std::move(name)) {}
~Function() = default;
static Function DefaultGetter(std::string return_type, Class* owner,
@@ -145,13 +149,15 @@ class Function {
return names;
}
- void PrintDeclaration(std::ostream& stream, int indentation = 0) const;
+ static constexpr int kAutomaticIndentation = -1;
+ void PrintDeclaration(std::ostream& stream,
+ int indentation = kAutomaticIndentation) const;
void PrintDefinition(std::ostream& stream,
const std::function<void(std::ostream&)>& builder,
int indentation = 0) const;
void PrintInlineDefinition(std::ostream& stream,
const std::function<void(std::ostream&)>& builder,
- int indentation = 0) const;
+ int indentation = 2) const;
void PrintBeginDefinition(std::ostream& stream, int indentation = 0) const;
void PrintEndDefinition(std::ostream& stream, int indentation = 0) const;
@@ -159,6 +165,7 @@ class Function {
void PrintDeclarationHeader(std::ostream& stream, int indentation) const;
private:
+ SourcePosition pos_;
Class* owning_class_;
std::string description_;
std::string name_;
diff --git a/deps/v8/src/torque/csa-generator.cc b/deps/v8/src/torque/csa-generator.cc
index 5443e812fd..adc86486c7 100644
--- a/deps/v8/src/torque/csa-generator.cc
+++ b/deps/v8/src/torque/csa-generator.cc
@@ -157,7 +157,6 @@ std::vector<std::string> CSAGenerator::ProcessArgumentsCommon(
std::vector<std::string> args;
for (auto it = parameter_types.rbegin(); it != parameter_types.rend(); ++it) {
const Type* type = *it;
- VisitResult arg;
if (type->IsConstexpr()) {
args.push_back(std::move(constexpr_arguments.back()));
constexpr_arguments.pop_back();
diff --git a/deps/v8/src/torque/declarable.cc b/deps/v8/src/torque/declarable.cc
index 479a1249b3..10b0d09daf 100644
--- a/deps/v8/src/torque/declarable.cc
+++ b/deps/v8/src/torque/declarable.cc
@@ -82,13 +82,12 @@ std::ostream& operator<<(std::ostream& os, const GenericCallable& g) {
}
SpecializationRequester::SpecializationRequester(SourcePosition position,
- Scope* scope, std::string name)
+ Scope* s, std::string name)
: position(position), name(std::move(name)) {
// Skip scopes that are not related to template specializations, they might be
// stack-allocated and not live for long enough.
- while (scope && scope->GetSpecializationRequester().IsNone())
- scope = scope->ParentScope();
- this->scope = scope;
+ while (s && s->GetSpecializationRequester().IsNone()) s = s->ParentScope();
+ this->scope = s;
}
std::vector<Declarable*> Scope::Lookup(const QualifiedName& name) {
@@ -165,11 +164,11 @@ TypeArgumentInference GenericCallable::InferSpecializationTypes(
}
base::Optional<Statement*> GenericCallable::CallableBody() {
- if (auto* decl = TorqueMacroDeclaration::DynamicCast(declaration())) {
- return decl->body;
- } else if (auto* decl =
+ if (auto* macro_decl = TorqueMacroDeclaration::DynamicCast(declaration())) {
+ return macro_decl->body;
+ } else if (auto* builtin_decl =
TorqueBuiltinDeclaration::DynamicCast(declaration())) {
- return decl->body;
+ return builtin_decl->body;
} else {
return base::nullopt;
}
diff --git a/deps/v8/src/torque/declaration-visitor.cc b/deps/v8/src/torque/declaration-visitor.cc
index 71cde50963..7e46ce59c2 100644
--- a/deps/v8/src/torque/declaration-visitor.cc
+++ b/deps/v8/src/torque/declaration-visitor.cc
@@ -5,6 +5,7 @@
#include "src/torque/declaration-visitor.h"
#include "src/torque/ast.h"
+#include "src/torque/kythe-data.h"
#include "src/torque/server-data.h"
#include "src/torque/type-inference.h"
#include "src/torque/type-visitor.h"
@@ -109,16 +110,20 @@ Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
Error("Builtins cannot have return type void.");
}
- return Declarations::CreateBuiltin(std::move(external_name),
- std::move(readable_name), kind,
- std::move(signature), body);
+ Builtin* builtin = Declarations::CreateBuiltin(std::move(external_name),
+ std::move(readable_name), kind,
+ std::move(signature), body);
+ // TODO(v8:12261): Recheck this.
+ // builtin->SetIdentifierPosition(decl->name->pos);
+ return builtin;
}
void DeclarationVisitor::Visit(ExternalBuiltinDeclaration* decl) {
- Declarations::Declare(
- decl->name->value,
+ Builtin* builtin =
CreateBuiltin(decl, decl->name->value, decl->name->value,
- TypeVisitor::MakeSignature(decl), base::nullopt));
+ TypeVisitor::MakeSignature(decl), base::nullopt);
+ builtin->SetIdentifierPosition(decl->name->pos);
+ Declarations::Declare(decl->name->value, builtin);
}
void DeclarationVisitor::Visit(ExternalRuntimeDeclaration* decl) {
@@ -152,29 +157,43 @@ void DeclarationVisitor::Visit(ExternalRuntimeDeclaration* decl) {
}
}
- Declarations::DeclareRuntimeFunction(decl->name->value, signature);
+ RuntimeFunction* function =
+ Declarations::DeclareRuntimeFunction(decl->name->value, signature);
+ function->SetIdentifierPosition(decl->name->pos);
+ function->SetPosition(decl->pos);
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddFunctionDefinition(function);
+ }
}
void DeclarationVisitor::Visit(ExternalMacroDeclaration* decl) {
- Declarations::DeclareMacro(
+ Macro* macro = Declarations::DeclareMacro(
decl->name->value, true, decl->external_assembler_name,
TypeVisitor::MakeSignature(decl), base::nullopt, decl->op);
+ macro->SetIdentifierPosition(decl->name->pos);
+ macro->SetPosition(decl->pos);
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddFunctionDefinition(macro);
+ }
}
void DeclarationVisitor::Visit(TorqueBuiltinDeclaration* decl) {
- Declarations::Declare(
- decl->name->value,
- CreateBuiltin(decl, decl->name->value, decl->name->value,
- TypeVisitor::MakeSignature(decl), decl->body));
+ auto builtin = CreateBuiltin(decl, decl->name->value, decl->name->value,
+ TypeVisitor::MakeSignature(decl), decl->body);
+ builtin->SetIdentifierPosition(decl->name->pos);
+ builtin->SetPosition(decl->pos);
+ Declarations::Declare(decl->name->value, builtin);
}
void DeclarationVisitor::Visit(TorqueMacroDeclaration* decl) {
Macro* macro = Declarations::DeclareMacro(
decl->name->value, decl->export_to_csa, base::nullopt,
TypeVisitor::MakeSignature(decl), decl->body, decl->op);
- // TODO(szuend): Set identifier_position to decl->name->pos once all callable
- // names are changed from std::string to Identifier*.
+ macro->SetIdentifierPosition(decl->name->pos);
macro->SetPosition(decl->pos);
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddFunctionDefinition(macro);
+ }
}
void DeclarationVisitor::Visit(IntrinsicDeclaration* decl) {
@@ -183,8 +202,11 @@ void DeclarationVisitor::Visit(IntrinsicDeclaration* decl) {
}
void DeclarationVisitor::Visit(ConstDeclaration* decl) {
- Declarations::DeclareNamespaceConstant(
+ auto constant = Declarations::DeclareNamespaceConstant(
decl->name, TypeVisitor::ComputeType(decl->type), decl->expression);
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddConstantDefinition(constant);
+ }
}
void DeclarationVisitor::Visit(SpecializationDeclaration* decl) {
@@ -260,7 +282,11 @@ void DeclarationVisitor::Visit(ExternConstDeclaration* decl) {
ReportError(stream.str());
}
- Declarations::DeclareExternConstant(decl->name, type, decl->literal);
+ ExternConstant* constant =
+ Declarations::DeclareExternConstant(decl->name, type, decl->literal);
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddConstantDefinition(constant);
+ }
}
void DeclarationVisitor::Visit(CppIncludeDeclaration* decl) {
diff --git a/deps/v8/src/torque/declarations.cc b/deps/v8/src/torque/declarations.cc
index 1e1c89da86..2ff3680bcf 100644
--- a/deps/v8/src/torque/declarations.cc
+++ b/deps/v8/src/torque/declarations.cc
@@ -277,11 +277,12 @@ RuntimeFunction* Declarations::DeclareRuntimeFunction(
new RuntimeFunction(name, signature))));
}
-void Declarations::DeclareExternConstant(Identifier* name, const Type* type,
- std::string value) {
+ExternConstant* Declarations::DeclareExternConstant(Identifier* name,
+ const Type* type,
+ std::string value) {
CheckAlreadyDeclared<Value>(name->value, "constant");
- Declare(name->value, std::unique_ptr<ExternConstant>(
- new ExternConstant(name, type, value)));
+ return Declare(name->value, std::unique_ptr<ExternConstant>(
+ new ExternConstant(name, type, value)));
}
NamespaceConstant* Declarations::DeclareNamespaceConstant(Identifier* name,
diff --git a/deps/v8/src/torque/declarations.h b/deps/v8/src/torque/declarations.h
index d417e45ca2..739c021fec 100644
--- a/deps/v8/src/torque/declarations.h
+++ b/deps/v8/src/torque/declarations.h
@@ -132,8 +132,9 @@ class Declarations {
static RuntimeFunction* DeclareRuntimeFunction(const std::string& name,
const Signature& signature);
- static void DeclareExternConstant(Identifier* name, const Type* type,
- std::string value);
+ static ExternConstant* DeclareExternConstant(Identifier* name,
+ const Type* type,
+ std::string value);
static NamespaceConstant* DeclareNamespaceConstant(Identifier* name,
const Type* type,
Expression* body);
diff --git a/deps/v8/src/torque/earley-parser.cc b/deps/v8/src/torque/earley-parser.cc
index 9ebb132c82..7326996c70 100644
--- a/deps/v8/src/torque/earley-parser.cc
+++ b/deps/v8/src/torque/earley-parser.cc
@@ -18,11 +18,12 @@ namespace torque {
namespace {
struct LineAndColumnTracker {
- LineAndColumn previous{0, 0};
- LineAndColumn current{0, 0};
+ LineAndColumn previous{0, 0, 0};
+ LineAndColumn current{0, 0, 0};
void Advance(InputPosition from, InputPosition to) {
previous = current;
+ current.offset += std::distance(from, to);
while (from != to) {
if (*from == '\n') {
current.line += 1;
@@ -187,7 +188,8 @@ const Item* RunEarleyAlgorithm(
// Worklist for items at the next position.
std::vector<Item> future_items;
CurrentSourcePosition::Scope source_position(
- SourcePosition{CurrentSourceFile::Get(), {0, 0}, {0, 0}});
+ SourcePosition{CurrentSourceFile::Get(), LineAndColumn::Invalid(),
+ LineAndColumn::Invalid()});
std::vector<const Item*> completed_items;
std::unordered_map<std::pair<size_t, Symbol*>, std::set<const Item*>,
base::hash<std::pair<size_t, Symbol*>>>
diff --git a/deps/v8/src/torque/global-context.cc b/deps/v8/src/torque/global-context.cc
index a70e8ec41f..b3372f7542 100644
--- a/deps/v8/src/torque/global-context.cc
+++ b/deps/v8/src/torque/global-context.cc
@@ -13,12 +13,14 @@ DEFINE_CONTEXTUAL_VARIABLE(TargetArchitecture)
GlobalContext::GlobalContext(Ast ast)
: collect_language_server_data_(false),
+ collect_kythe_data_(false),
force_assert_statements_(false),
annotate_ir_(false),
ast_(std::move(ast)) {
CurrentScope::Scope current_scope(nullptr);
CurrentSourcePosition::Scope current_source_position(
- SourcePosition{CurrentSourceFile::Get(), {-1, -1}, {-1, -1}});
+ SourcePosition{CurrentSourceFile::Get(), LineAndColumn::Invalid(),
+ LineAndColumn::Invalid()});
default_namespace_ =
RegisterDeclarable(std::make_unique<Namespace>(kBaseNamespaceName));
}
diff --git a/deps/v8/src/torque/global-context.h b/deps/v8/src/torque/global-context.h
index b5b704d0b2..c0945e575a 100644
--- a/deps/v8/src/torque/global-context.h
+++ b/deps/v8/src/torque/global-context.h
@@ -49,6 +49,8 @@ class GlobalContext : public ContextualClass<GlobalContext> {
static bool collect_language_server_data() {
return Get().collect_language_server_data_;
}
+ static void SetCollectKytheData() { Get().collect_kythe_data_ = true; }
+ static bool collect_kythe_data() { return Get().collect_kythe_data_; }
static void SetForceAssertStatements() {
Get().force_assert_statements_ = true;
}
@@ -118,6 +120,7 @@ class GlobalContext : public ContextualClass<GlobalContext> {
private:
bool collect_language_server_data_;
+ bool collect_kythe_data_;
bool force_assert_statements_;
bool annotate_ir_;
Namespace* default_namespace_;
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index dceb660c21..1a1bec711b 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -17,6 +17,7 @@
#include "src/torque/csa-generator.h"
#include "src/torque/declaration-visitor.h"
#include "src/torque/global-context.h"
+#include "src/torque/kythe-data.h"
#include "src/torque/parameter-difference.h"
#include "src/torque/server-data.h"
#include "src/torque/source-positions.h"
@@ -29,6 +30,8 @@ namespace v8 {
namespace internal {
namespace torque {
+uint64_t next_unique_binding_index = 0;
+
// Sadly, 'using std::string_literals::operator""s;' is bugged in MSVC (see
// https://developercommunity.visualstudio.com/t/Incorrect-warning-when-using-standard-st/673948).
// TODO(nicohartmann@): Change to 'using std::string_literals::operator""s;'
@@ -74,7 +77,7 @@ const Type* ImplementationVisitor::Visit(Statement* stmt) {
void ImplementationVisitor::BeginGeneratedFiles() {
std::set<SourceId> contains_class_definitions;
for (const ClassType* type : TypeOracle::GetClasses()) {
- if (type->GenerateCppClassDefinitions()) {
+ if (type->ShouldGenerateCppClassDefinitions()) {
contains_class_definitions.insert(type->AttributedToFile());
}
}
@@ -103,6 +106,7 @@ void ImplementationVisitor::BeginGeneratedFiles() {
file << "\n";
streams.csa_cc.BeginNamespace("v8", "internal");
+ streams.csa_ccfile << "\n";
}
// Output beginning of CSA .h file.
{
@@ -115,6 +119,7 @@ void ImplementationVisitor::BeginGeneratedFiles() {
file << "\n";
streams.csa_header.BeginNamespace("v8", "internal");
+ streams.csa_headerfile << "\n";
}
// Output beginning of class definition .cc file.
{
@@ -128,6 +133,7 @@ void ImplementationVisitor::BeginGeneratedFiles() {
}
streams.class_definition_cc.BeginNamespace("v8", "internal");
+ streams.class_definition_ccfile << "\n";
}
}
}
@@ -146,6 +152,7 @@ void ImplementationVisitor::EndGeneratedFiles() {
UnderlinifyPath(SourceFileMap::PathFromV8Root(file)) + "_CSA_H_";
streams.csa_header.EndNamespace("v8", "internal");
+ streams.csa_headerfile << "\n";
streams.csa_header.EndIncludeGuard(header_define);
}
@@ -291,24 +298,33 @@ VisitResult ImplementationVisitor::InlineMacro(
DCHECK(macro->IsMethod());
parameter_bindings.Add(kThisParameterName, LocalValue{*this_reference},
true);
+ // TODO(v8:12261): Tracking 'this'-binding for kythe led to a few weird
+ // issues. Review to fully support 'this' in methods.
}
- size_t i = 0;
+ size_t count = 0;
for (auto arg : arguments) {
- if (this_reference && i == signature.implicit_count) i++;
- const bool mark_as_used = signature.implicit_count > i;
- const Identifier* name = macro->parameter_names()[i++];
- parameter_bindings.Add(name,
- LocalValue{LocationReference::Temporary(
- arg, "parameter " + name->value)},
- mark_as_used);
+ if (this_reference && count == signature.implicit_count) count++;
+ const bool mark_as_used = signature.implicit_count > count;
+ const Identifier* name = macro->parameter_names()[count++];
+ Binding<LocalValue>* binding =
+ parameter_bindings.Add(name,
+ LocalValue{LocationReference::Temporary(
+ arg, "parameter " + name->value)},
+ mark_as_used);
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddBindingDefinition(binding);
+ }
}
DCHECK_EQ(label_blocks.size(), signature.labels.size());
for (size_t i = 0; i < signature.labels.size(); ++i) {
const LabelDeclaration& label_info = signature.labels[i];
- label_bindings.Add(label_info.name,
- LocalLabel{label_blocks[i], label_info.types});
+ Binding<LocalLabel>* binding = label_bindings.Add(
+ label_info.name, LocalLabel{label_blocks[i], label_info.types});
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddBindingDefinition(binding);
+ }
}
Block* macro_end;
@@ -382,6 +398,7 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
cpp::Function f = GenerateMacroFunctionDeclaration(macro);
f.PrintDeclaration(csa_headerfile());
+ csa_headerfile() << "\n";
cpp::File csa_cc(csa_ccfile());
@@ -475,9 +492,9 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
const LabelDeclaration& label_info = signature.labels[i];
assembler().Bind(label_block);
std::vector<std::string> label_parameter_variables;
- for (size_t i = 0; i < label_info.types.size(); ++i) {
- LowerLabelParameter(label_info.types[i],
- ExternalLabelParameterName(label_info.name->value, i),
+ for (size_t j = 0; j < label_info.types.size(); ++j) {
+ LowerLabelParameter(label_info.types[j],
+ ExternalLabelParameterName(label_info.name->value, j),
&label_parameter_variables);
}
assembler().Emit(GotoExternalInstruction{
@@ -518,7 +535,6 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
f.PrintEndDefinition(csa_ccfile());
include_guard.reset();
- csa_ccfile() << "\n";
}
void ImplementationVisitor::Visit(TorqueMacro* macro) {
@@ -542,11 +558,14 @@ std::string AddParameter(size_t i, Builtin* builtin,
std::string external_name = "parameter" + std::to_string(i);
parameters->Push(external_name);
StackRange range = parameter_types->PushMany(LowerType(type));
- parameter_bindings->Add(
+ Binding<LocalValue>* binding = parameter_bindings->Add(
name,
LocalValue{LocationReference::Temporary(VisitResult(type, range),
"parameter " + name->value)},
mark_as_used);
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddBindingDefinition(binding);
+ }
return external_name;
}
@@ -588,9 +607,9 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
"UncheckedCast<RawPtrT>(LoadFramePointer());\n";
csa_ccfile() << " TorqueStructArguments "
"torque_arguments(GetFrameArguments(arguments_frame, "
- "arguments_length, FrameArgumentsArgcType::"
- << (kJSArgcIncludesReceiver ? "kCountIncludesReceiver"
- : "kCountExcludesReceiver")
+ "arguments_length, (kJSArgcIncludesReceiver ? "
+ "FrameArgumentsArgcType::kCountIncludesReceiver : "
+ "FrameArgumentsArgcType::kCountExcludesReceiver)"
<< "));\n";
csa_ccfile()
<< " CodeStubArguments arguments(this, torque_arguments);\n";
@@ -741,12 +760,12 @@ const Type* ImplementationVisitor::Visit(
ReportError("constexpr variables need an initializer");
}
TypeVector lowered_types = LowerType(*type);
- for (const Type* type : lowered_types) {
+ for (const Type* t : lowered_types) {
assembler().Emit(PushUninitializedInstruction{TypeOracle::GetTopType(
"uninitialized variable '" + stmt->name->value + "' of type " +
- type->ToString() + " originally defined at " +
+ t->ToString() + " originally defined at " +
PositionAsString(stmt->pos),
- type)});
+ t)});
}
init_result =
VisitResult(*type, assembler().TopRange(lowered_types.size()));
@@ -1005,6 +1024,9 @@ const Type* ImplementationVisitor::Visit(GotoStatement* stmt) {
LanguageServerData::AddDefinition(stmt->label->pos,
label->declaration_position());
}
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddBindingUse(stmt->label->pos, label);
+ }
size_t i = 0;
StackRange arguments = assembler().TopRange(0);
@@ -1196,7 +1218,7 @@ const Type* ImplementationVisitor::Visit(AssertStatement* stmt) {
{}});
return TypeOracle::GetVoidType();
}
- bool do_check = stmt->kind != AssertStatement::AssertKind::kAssert ||
+ bool do_check = stmt->kind != AssertStatement::AssertKind::kDcheck ||
GlobalContext::force_assert_statements();
#if defined(DEBUG)
do_check = true;
@@ -1210,15 +1232,15 @@ const Type* ImplementationVisitor::Visit(AssertStatement* stmt) {
assembler().Bind(unreachable_block);
}
- // CSA_ASSERT & co. are not used here on purpose for two reasons. First,
+ // CSA_DCHECK & co. are not used here on purpose for two reasons. First,
// Torque allows and handles two types of expressions in the if protocol
// automagically, ones that return TNode<BoolT> and those that use the
// BranchIf(..., Label* true, Label* false) idiom. Because the machinery to
// handle this is embedded in the expression handling and to it's not
- // possible to make the decision to use CSA_ASSERT or CSA_ASSERT_BRANCH
+ // possible to make the decision to use CSA_DCHECK or CSA_DCHECK_BRANCH
// isn't trivial up-front. Secondly, on failure, the assert text should be
// the corresponding Torque code, not the -gen.cc code, which would be the
- // case when using CSA_ASSERT_XXX.
+ // case when using CSA_DCHECK_XXX.
Block* true_block = assembler().NewBlock(assembler().CurrentStack());
Block* false_block = assembler().NewBlock(assembler().CurrentStack(), true);
GenerateExpressionBranch(stmt->expression, true_block, false_block);
@@ -1911,9 +1933,9 @@ void FailCallableLookup(
stream << "\nfailed to instantiate all of these generic declarations:";
for (auto& failure : inapplicable_generics) {
GenericCallable* generic = failure.first;
- const std::string& reason = failure.second;
+ const std::string& fail_reason = failure.second;
stream << "\n " << generic->name() << " defined at "
- << generic->Position() << ":\n " << reason << "\n";
+ << generic->Position() << ":\n " << fail_reason << "\n";
}
}
ReportError(stream.str());
@@ -2230,6 +2252,9 @@ LocationReference ImplementationVisitor::GenerateFieldAccess(
if (GlobalContext::collect_language_server_data() && pos.has_value()) {
LanguageServerData::AddDefinition(*pos, field.pos);
}
+ if (GlobalContext::collect_kythe_data() && pos.has_value()) {
+ KytheData::AddClassFieldUse(*pos, &field);
+ }
if (field.const_qualified) {
VisitResult t_value = ProjectStructField(reference.variable(), fieldname);
return LocationReference::Temporary(
@@ -2326,6 +2351,9 @@ LocationReference ImplementationVisitor::GenerateFieldAccess(
if (GlobalContext::collect_language_server_data() && pos.has_value()) {
LanguageServerData::AddDefinition(*pos, field.pos);
}
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddClassFieldUse(*pos, &field);
+ }
return GenerateFieldReference(object_result, field, *class_type);
}
}
@@ -2367,6 +2395,13 @@ LocationReference ImplementationVisitor::GetLocationReference(
LanguageServerData::AddDefinition(expr->name->pos,
(*value)->declaration_position());
}
+ if (GlobalContext::collect_kythe_data()) {
+ if (!expr->IsThis()) {
+ DCHECK_EQ(expr->name->pos.end.column - expr->name->pos.start.column,
+ expr->name->value.length());
+ KytheData::AddBindingUse(expr->name->pos, *value);
+ }
+ }
if (expr->generic_arguments.size() != 0) {
ReportError("cannot have generic parameters on local name ",
expr->name);
@@ -2385,6 +2420,7 @@ LocationReference ImplementationVisitor::GetLocationReference(
LanguageServerData::AddDefinition(expr->name->pos,
(*builtin)->Position());
}
+ // TODO(v8:12261): Consider collecting KytheData here.
return LocationReference::Temporary(GetBuiltinCode(*builtin),
"builtin " + expr->name->value);
}
@@ -2411,6 +2447,9 @@ LocationReference ImplementationVisitor::GetLocationReference(
LanguageServerData::AddDefinition(expr->name->pos, value->name()->pos);
}
if (auto* constant = NamespaceConstant::DynamicCast(value)) {
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddConstantUse(expr->name->pos, constant);
+ }
if (constant->type()->IsConstexpr()) {
return LocationReference::Temporary(
VisitResult(constant->type(), constant->external_name() + "(state_)"),
@@ -2424,6 +2463,9 @@ LocationReference ImplementationVisitor::GetLocationReference(
"namespace constant " + expr->name->value);
}
ExternConstant* constant = ExternConstant::cast(value);
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddConstantUse(expr->name->pos, constant);
+ }
return LocationReference::Temporary(constant->value(),
"extern value " + expr->name->value);
}
@@ -3020,10 +3062,10 @@ VisitResult ImplementationVisitor::GenerateCall(
arguments_to_getter.parameters.begin(),
converted_arguments.begin() + 1, converted_arguments.end());
- Callable* callable = LookupCallable(
+ Callable* callable_macro = LookupCallable(
qualified_getter_name, Declarations::Lookup(qualified_getter_name),
arguments_to_getter, {});
- Macro* getter = Macro::DynamicCast(callable);
+ Macro* getter = Macro::DynamicCast(callable_macro);
if (!getter || getter->IsMethod()) {
ReportError(
"%MakeLazy expects a macro, not builtin or other type of callable");
@@ -3050,10 +3092,10 @@ VisitResult ImplementationVisitor::GenerateCall(
StackRange argument_range_for_getter = assembler().TopRange(0);
std::vector<std::string> constexpr_arguments_for_getter;
- size_t current = 0;
+ size_t arg_count = 0;
for (auto arg : arguments_to_getter.parameters) {
- DCHECK_LT(current, getter->signature().types().size());
- const Type* to_type = getter->signature().types()[current++];
+ DCHECK_LT(arg_count, getter->signature().types().size());
+ const Type* to_type = getter->signature().types()[arg_count++];
AddCallParameter(getter, arg, to_type, &converted_arguments_for_getter,
&argument_range_for_getter,
&constexpr_arguments_for_getter,
@@ -3069,10 +3111,20 @@ VisitResult ImplementationVisitor::GenerateCall(
const Type* type = specialization_types[0];
const ClassType* class_type = ClassType::DynamicCast(type);
if (!class_type) {
- ReportError("%FieldSlice must take a class type parameter");
+ ReportError("The first type parameter to %FieldSlice must be a class");
}
const Field& field =
class_type->LookupField(StringLiteralUnquote(constexpr_arguments[0]));
+ const Type* expected_slice_type =
+ field.const_qualified
+ ? TypeOracle::GetConstSliceType(field.name_and_type.type)
+ : TypeOracle::GetMutableSliceType(field.name_and_type.type);
+ const Type* declared_slice_type = specialization_types[1];
+ if (expected_slice_type != declared_slice_type) {
+ Error(
+ "The second type parameter to %FieldSlice must be the precise "
+ "slice type for the named field");
+ }
LocationReference ref = GenerateFieldReference(
VisitResult(type, argument_range), field, class_type,
/*treat_optional_as_indexed=*/true);
@@ -3135,6 +3187,12 @@ VisitResult ImplementationVisitor::Visit(CallExpression* expr,
LanguageServerData::AddDefinition(expr->callee->name->pos,
callable->IdentifierPosition());
}
+ if (GlobalContext::collect_kythe_data()) {
+ Callable* callable = LookupCallable(name, Declarations::Lookup(name),
+ arguments, specialization_types);
+ Callable* caller = CurrentCallable::Get();
+ KytheData::AddCall(caller, expr->callee->name->pos, callable);
+ }
if (expr->callee->name->value == "!" && arguments.parameters.size() == 1) {
PropagateBitfieldMark(expr->arguments[0], expr);
}
@@ -3178,6 +3236,10 @@ VisitResult ImplementationVisitor::Visit(CallMethodExpression* expr) {
LanguageServerData::AddDefinition(expr->method->name->pos,
callable->IdentifierPosition());
}
+ if (GlobalContext::collect_kythe_data()) {
+ Callable* caller = CurrentCallable::Get();
+ KytheData::AddCall(caller, expr->method->name->pos, callable);
+ }
return scope.Yield(GenerateCall(callable, target, arguments, {}, false));
}
@@ -3268,6 +3330,7 @@ std::vector<Binding<LocalLabel>*> ImplementationVisitor::LabelsFromIdentifiers(
LanguageServerData::AddDefinition(name->pos,
label->declaration_position());
}
+ // TODO(v8:12261): Might have to track KytheData here.
}
return result;
}
@@ -3525,7 +3588,8 @@ void ImplementationVisitor::GenerateBuiltinDefinitionsAndInterfaceDescriptors(
// count.
int parameter_count =
static_cast<int>(builtin->signature().ExplicitCount());
- builtin_definitions << ", " << JSParameterCount(parameter_count);
+ builtin_definitions << ", JSParameterCount(" << parameter_count
+ << ")";
// And the receiver is explicitly declared.
builtin_definitions << ", kReceiver";
for (size_t i = builtin->signature().implicit_count;
@@ -3547,7 +3611,8 @@ void ImplementationVisitor::GenerateBuiltinDefinitionsAndInterfaceDescriptors(
Declarations::FindSomeInternalBuiltinWithType(type);
if (!example_builtin) {
CurrentSourcePosition::Scope current_source_position(
- SourcePosition{CurrentSourceFile::Get(), {-1, -1}, {-1, -1}});
+ SourcePosition{CurrentSourceFile::Get(), LineAndColumn::Invalid(),
+ LineAndColumn::Invalid()});
ReportError("unable to find any builtin with type \"", *type, "\"");
}
builtin_definitions << " V(" << type->function_pointer_type_id() << ","
@@ -3716,29 +3781,6 @@ class FieldOffsetsGenerator {
bool header_size_emitted_ = false;
};
-class MacroFieldOffsetsGenerator : public FieldOffsetsGenerator {
- public:
- MacroFieldOffsetsGenerator(std::ostream& out, const ClassType* type)
- : FieldOffsetsGenerator(type), out_(out) {
- out_ << "#define ";
- out_ << "TORQUE_GENERATED_" << CapifyStringWithUnderscores(type_->name())
- << "_FIELDS(V) \\\n";
- }
- void WriteField(const Field& f, const std::string& size_string) override {
- out_ << "V(k" << CamelifyString(f.name_and_type.name) << "Offset, "
- << size_string << ") \\\n";
- }
- void WriteFieldOffsetGetter(const Field& f) override {
- // Can't do anything here.
- }
- void WriteMarker(const std::string& marker) override {
- out_ << "V(" << marker << ", 0) \\\n";
- }
-
- private:
- std::ostream& out_;
-};
-
void GenerateClassExport(const ClassType* type, std::ostream& header,
std::ostream& inl_header) {
const ClassType* super = type->GetSuperClass();
@@ -3756,27 +3798,13 @@ void GenerateClassExport(const ClassType* type, std::ostream& header,
} // namespace
-void ImplementationVisitor::GenerateClassFieldOffsets(
+void ImplementationVisitor::GenerateVisitorLists(
const std::string& output_directory) {
std::stringstream header;
- std::string file_name = "field-offsets.h";
+ std::string file_name = "visitor-lists.h";
{
IncludeGuardScope include_guard(header, file_name);
- for (const ClassType* type : TypeOracle::GetClasses()) {
- // TODO(danno): Remove this once all classes use ClassFieldOffsetGenerator
- // to generate field offsets without the use of macros.
- if (!type->GenerateCppClassDefinitions() && !type->HasUndefinedLayout()) {
- MacroFieldOffsetsGenerator g(header, type);
- for (auto f : type->fields()) {
- CurrentSourcePosition::Scope scope(f.pos);
- g.RecordOffsetFor(f);
- }
- g.Finish();
- header << "\n";
- }
- }
-
header << "#define TORQUE_INSTANCE_TYPE_TO_BODY_DESCRIPTOR_LIST(V)\\\n";
for (const ClassType* type : TypeOracle::GetClasses()) {
if (type->ShouldGenerateBodyDescriptor() && type->OwnInstanceType()) {
@@ -3867,7 +3895,9 @@ class ClassFieldOffsetGenerator : public FieldOffsetsGenerator {
previous_field_end_((parent && parent->IsShape()) ? "P::kSize"
: "P::kHeaderSize"),
gen_name_(gen_name) {}
+
void WriteField(const Field& f, const std::string& size_string) override {
+ hdr_ << " // " << PositionAsString(f.pos) << "\n";
std::string field = "k" + CamelifyString(f.name_and_type.name) + "Offset";
std::string field_end = field + "End";
hdr_ << " static constexpr int " << field << " = " << previous_field_end_
@@ -3876,6 +3906,7 @@ class ClassFieldOffsetGenerator : public FieldOffsetsGenerator {
<< size_string << " - 1;\n";
previous_field_end_ = field_end + " + 1";
}
+
void WriteFieldOffsetGetter(const Field& f) override {
// A static constexpr int is more convenient than a getter if the offset is
// known.
@@ -3928,6 +3959,8 @@ class CppClassGenerator {
void GenerateClass();
private:
+ SourcePosition Position();
+
void GenerateClassConstructors();
// Generates getter and setter runtime member functions for the given class
@@ -3996,18 +4029,21 @@ void CppClassGenerator::GenerateClass() {
f.AddParameter("HeapObject", "o");
f.PrintDeclaration(hdr_);
+ hdr_ << "\n";
f.PrintDefinition(impl_, [&](std::ostream& stream) {
- stream << " return o.Is" << name_ << "();";
+ stream << " return o.Is" << name_ << "();\n";
});
}
-
+ hdr_ << "// Definition " << PositionAsString(Position()) << "\n";
hdr_ << template_decl() << "\n";
hdr_ << "class " << gen_name_ << " : public P {\n";
- hdr_ << " static_assert(std::is_same<" << name_ << ", D>::value,\n"
- << " \"Use this class as direct base for " << name_ << ".\");\n";
- hdr_ << " static_assert(std::is_same<" << super_->name() << ", P>::value,\n"
- << " \"Pass in " << super_->name()
- << " as second template parameter for " << gen_name_ << ".\");\n";
+ hdr_ << " static_assert(\n"
+ << " std::is_same<" << name_ << ", D>::value,\n"
+ << " \"Use this class as direct base for " << name_ << ".\");\n";
+ hdr_ << " static_assert(\n"
+ << " std::is_same<" << super_->name() << ", P>::value,\n"
+ << " \"Pass in " << super_->name()
+ << " as second template parameter for " << gen_name_ << ".\");\n\n";
hdr_ << " public: \n";
hdr_ << " using Super = P;\n";
hdr_ << " using TorqueGeneratedClass = " << gen_name_ << "<D,P>;\n\n";
@@ -4015,6 +4051,7 @@ void CppClassGenerator::GenerateClass() {
hdr_ << " protected: // not extern or @export\n";
}
for (const Field& f : type_->fields()) {
+ CurrentSourcePosition::Scope scope(f.pos);
std::vector<const Field*> struct_fields;
GenerateFieldAccessors(f, struct_fields);
}
@@ -4029,7 +4066,7 @@ void CppClassGenerator::GenerateClass() {
cpp::Class c(std::move(templateArgs), gen_name_);
if (type_->ShouldGeneratePrint()) {
- hdr_ << "\n DECL_PRINTER(" << name_ << ")\n";
+ hdr_ << " DECL_PRINTER(" << name_ << ")\n\n";
}
if (type_->ShouldGenerateVerify()) {
@@ -4048,7 +4085,10 @@ void CppClassGenerator::GenerateClass() {
impl_ << " TorqueGeneratedClassVerifiers::" << name_ << "Verify(" << name_
<< "::cast(*this), "
"isolate);\n";
- impl_ << "}\n";
+ impl_ << "}\n\n";
+ }
+ if (type_->ShouldGenerateVerify()) {
+ impl_ << "\n";
}
hdr_ << "\n";
@@ -4075,7 +4115,7 @@ void CppClassGenerator::GenerateClass() {
{
cpp::Function f =
cpp::Function::DefaultGetter("int", &c, "AllocatedSize");
- f.PrintDeclaration(hdr_, 2);
+ f.PrintDeclaration(hdr_);
f.PrintDefinition(inl_, [&](std::ostream& stream) {
stream << " auto slice = "
@@ -4119,10 +4159,10 @@ void CppClassGenerator::GenerateClass() {
// V8_INLINE int32_t AllocatedSize() const
{
- cpp::Function f =
+ cpp::Function allocated_size_f =
cpp::Function::DefaultGetter("int32_t", &c, "AllocatedSize");
- f.SetFlag(cpp::Function::kV8Inline);
- f.PrintInlineDefinition(hdr_, [&](std::ostream& stream) {
+ allocated_size_f.SetFlag(cpp::Function::kV8Inline);
+ allocated_size_f.PrintInlineDefinition(hdr_, [&](std::ostream& stream) {
stream << " return SizeFor(";
bool first = true;
for (auto field : *index_fields) {
@@ -4150,7 +4190,7 @@ void CppClassGenerator::GenerateClass() {
// hand-written definition.
base::Optional<const ClassType*> parent = type_->parent()->ClassSupertype();
while (parent) {
- if ((*parent)->GenerateCppClassDefinitions() &&
+ if ((*parent)->ShouldGenerateCppClassDefinitions() &&
!(*parent)->ShouldGenerateFullClassDefinition() &&
(*parent)->AttributedToFile() == type_->AttributedToFile()) {
Error("Exported ", *type_,
@@ -4171,15 +4211,17 @@ void CppClassGenerator::GenerateClassCasts() {
// V8_INLINE static D cast(Object)
f.PrintInlineDefinition(hdr_, [](std::ostream& stream) {
- stream << " return D(object.ptr());\n";
+ stream << " return D(object.ptr());\n";
});
// V8_INLINE static D unchecked_cast(Object)
f.SetName("unchecked_cast");
f.PrintInlineDefinition(hdr_, [](std::ostream& stream) {
- stream << " return bit_cast<D>(object);\n";
+ stream << " return bit_cast<D>(object);\n";
});
}
+SourcePosition CppClassGenerator::Position() { return type_->GetPosition(); }
+
void CppClassGenerator::GenerateClassConstructors() {
const ClassType* typecheck_type = type_;
while (typecheck_type->IsShape()) {
@@ -4190,14 +4232,13 @@ void CppClassGenerator::GenerateClassConstructors() {
DCHECK(typecheck_type);
}
- hdr_ << " public:\n";
hdr_ << " template <class DAlias = D>\n";
hdr_ << " constexpr " << gen_name_ << "() : P() {\n";
- hdr_ << " static_assert(std::is_base_of<" << gen_name_ << ", \n";
- hdr_ << " DAlias>::value,\n";
- hdr_ << " \"class " << gen_name_ << " should be used as direct base for "
- << name_ << ".\");\n";
- hdr_ << " }\n";
+ hdr_ << " static_assert(\n";
+ hdr_ << " std::is_base_of<" << gen_name_ << ", DAlias>::value,\n";
+ hdr_ << " \"class " << gen_name_
+ << " should be used as direct base for " << name_ << ".\");\n";
+ hdr_ << " }\n\n";
hdr_ << " protected:\n";
hdr_ << " inline explicit " << gen_name_ << "(Address ptr);\n";
@@ -4209,7 +4250,7 @@ void CppClassGenerator::GenerateClassConstructors() {
inl_ << "template<class D, class P>\n";
inl_ << "inline " << gen_name_T_ << "::" << gen_name_ << "(Address ptr)\n";
- inl_ << " : P(ptr) {\n";
+ inl_ << " : P(ptr) {\n";
inl_ << " SLOW_DCHECK(Is" << typecheck_type->name()
<< "_NonInline(*this));\n";
inl_ << "}\n";
@@ -4217,7 +4258,7 @@ void CppClassGenerator::GenerateClassConstructors() {
inl_ << "template<class D, class P>\n";
inl_ << "inline " << gen_name_T_ << "::" << gen_name_
<< "(Address ptr, HeapObject::AllowInlineSmiStorage allow_smi)\n";
- inl_ << " : P(ptr, allow_smi) {\n";
+ inl_ << " : P(ptr, allow_smi) {\n";
inl_ << " SLOW_DCHECK("
<< "(allow_smi == HeapObject::AllowInlineSmiStorage::kAllowBeingASmi"
" && this->IsSmi()) || Is"
@@ -4275,6 +4316,14 @@ void GenerateBoundsDCheck(std::ostream& os, const std::string& index,
}
os << " DCHECK_LT(" << index << ", " << length_expression << ");\n";
}
+
+bool CanGenerateFieldAccessors(const Type* field_type) {
+ // float64_or_hole should be treated like float64. For now, we don't need it.
+ // TODO(v8:10391) Generate accessors for external pointers.
+ return field_type != TypeOracle::GetVoidType() &&
+ field_type != TypeOracle::GetFloat64OrHoleType() &&
+ !field_type->IsSubtypeOf(TypeOracle::GetExternalPointerType());
+}
} // namespace
// TODO(sigurds): Keep in sync with DECL_ACCESSORS and ACCESSORS macro.
@@ -4283,12 +4332,7 @@ void CppClassGenerator::GenerateFieldAccessors(
const Field& innermost_field =
struct_fields.empty() ? class_field : *struct_fields.back();
const Type* field_type = innermost_field.name_and_type.type;
- if (field_type == TypeOracle::GetVoidType()) return;
-
- // float64_or_hole should be treated like float64. For now, we don't need it.
- if (field_type == TypeOracle::GetFloat64OrHoleType()) {
- return;
- }
+ if (!CanGenerateFieldAccessors(field_type)) return;
if (const StructType* struct_type = StructType::DynamicCast(field_type)) {
struct_fields.resize(struct_fields.size() + 1);
@@ -4300,11 +4344,6 @@ void CppClassGenerator::GenerateFieldAccessors(
return;
}
- // TODO(v8:10391) Generate accessors for external pointers
- if (field_type->IsSubtypeOf(TypeOracle::GetExternalPointerType())) {
- return;
- }
-
bool indexed = class_field.index && !class_field.index->optional;
std::string type_name = GetTypeNameForAccessor(innermost_field);
bool can_contain_heap_objects = CanContainHeapObjects(field_type);
@@ -4618,9 +4657,10 @@ void ImplementationVisitor::GenerateClassDefinitions(
// Emit forward declarations.
for (const ClassType* type : TypeOracle::GetClasses()) {
+ CurrentSourcePosition::Scope position_activator(type->GetPosition());
auto& streams = GlobalContext::GeneratedPerFile(type->AttributedToFile());
std::ostream& header = streams.class_definition_headerfile;
- std::string name = type->GenerateCppClassDefinitions()
+ std::string name = type->ShouldGenerateCppClassDefinitions()
? type->name()
: type->GetGeneratedTNodeTypeName();
header << "class " << name << ";\n";
@@ -4628,12 +4668,13 @@ void ImplementationVisitor::GenerateClassDefinitions(
}
for (const ClassType* type : TypeOracle::GetClasses()) {
+ CurrentSourcePosition::Scope position_activator(type->GetPosition());
auto& streams = GlobalContext::GeneratedPerFile(type->AttributedToFile());
std::ostream& header = streams.class_definition_headerfile;
std::ostream& inline_header = streams.class_definition_inline_headerfile;
std::ostream& implementation = streams.class_definition_ccfile;
- if (type->GenerateCppClassDefinitions()) {
+ if (type->ShouldGenerateCppClassDefinitions()) {
CppClassGenerator g(type, header, inline_header, implementation);
g.GenerateClass();
}
@@ -4729,6 +4770,7 @@ void ImplementationVisitor::GenerateClassDefinitions(
}
for (const StructType* type : structs_used_in_classes) {
+ CurrentSourcePosition::Scope position_activator(type->GetPosition());
std::ostream& header =
GlobalContext::GeneratedPerFile(type->GetPosition().source)
.class_definition_headerfile;
@@ -4758,43 +4800,50 @@ void GeneratePrintDefinitionsForClass(std::ostream& impl, const ClassType* type,
std::map<std::string, const AggregateType*> field_names;
for (const AggregateType* aggregate_type : hierarchy) {
for (const Field& f : aggregate_type->fields()) {
- if (f.name_and_type.name == "map") continue;
- if (!f.index.has_value()) {
- if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType()) ||
- !f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
- impl << " os << \"\\n - " << f.name_and_type.name << ": \" << ";
- if (f.name_and_type.type->StructSupertype()) {
- // TODO(turbofan): Print struct fields too.
- impl << "\" <struct field printing still unimplemented>\";\n";
- } else {
- impl << "this->" << f.name_and_type.name;
- switch (f.read_synchronization) {
- case FieldSynchronization::kNone:
- impl << "();\n";
- break;
- case FieldSynchronization::kRelaxed:
- impl << "(kRelaxedLoad);\n";
- break;
- case FieldSynchronization::kAcquireRelease:
- impl << "(kAcquireLoad);\n";
- break;
- }
- }
+ if (f.name_and_type.name == "map" || f.index.has_value() ||
+ !CanGenerateFieldAccessors(f.name_and_type.type)) {
+ continue;
+ }
+ std::string getter = f.name_and_type.name;
+ if (aggregate_type != type) {
+ // We must call getters directly on the class that provided them,
+ // because a subclass could have hidden them.
+ getter = aggregate_type->name() + "::TorqueGeneratedClass::" + getter;
+ }
+ if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType()) ||
+ !f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ impl << " os << \"\\n - " << f.name_and_type.name << ": \" << ";
+ if (f.name_and_type.type->StructSupertype()) {
+ // TODO(turbofan): Print struct fields too.
+ impl << "\" <struct field printing still unimplemented>\";\n";
} else {
- impl << " os << \"\\n - " << f.name_and_type.name << ": \" << "
- << "Brief(this->" << f.name_and_type.name;
+ impl << "this->" << getter;
switch (f.read_synchronization) {
case FieldSynchronization::kNone:
- impl << "());\n";
+ impl << "();\n";
break;
case FieldSynchronization::kRelaxed:
- impl << "(kRelaxedLoad));\n";
+ impl << "(kRelaxedLoad);\n";
break;
case FieldSynchronization::kAcquireRelease:
- impl << "(kAcquireLoad));\n";
+ impl << "(kAcquireLoad);\n";
break;
}
}
+ } else {
+ impl << " os << \"\\n - " << f.name_and_type.name << ": \" << "
+ << "Brief(this->" << getter;
+ switch (f.read_synchronization) {
+ case FieldSynchronization::kNone:
+ impl << "());\n";
+ break;
+ case FieldSynchronization::kRelaxed:
+ impl << "(kRelaxedLoad));\n";
+ break;
+ case FieldSynchronization::kAcquireRelease:
+ impl << "(kAcquireLoad));\n";
+ break;
+ }
}
}
}
@@ -4817,19 +4866,14 @@ void ImplementationVisitor::GeneratePrintDefinitions(
for (const ClassType* type : TypeOracle::GetClasses()) {
if (!type->ShouldGeneratePrint()) continue;
-
- if (type->GenerateCppClassDefinitions()) {
- const ClassType* super = type->GetSuperClass();
- std::string gen_name = "TorqueGenerated" + type->name();
- std::string gen_name_T =
- gen_name + "<" + type->name() + ", " + super->name() + ">";
- std::string template_decl = "template <>";
- GeneratePrintDefinitionsForClass(impl, type, gen_name, gen_name_T,
- template_decl);
- } else {
- GeneratePrintDefinitionsForClass(impl, type, type->name(), type->name(),
- "");
- }
+ DCHECK(type->ShouldGenerateCppClassDefinitions());
+ const ClassType* super = type->GetSuperClass();
+ std::string gen_name = "TorqueGenerated" + type->name();
+ std::string gen_name_T =
+ gen_name + "<" + type->name() + ", " + super->name() + ">";
+ std::string template_decl = "template <>";
+ GeneratePrintDefinitionsForClass(impl, type, gen_name, gen_name_T,
+ template_decl);
}
}
@@ -5056,7 +5100,6 @@ void GenerateClassFieldVerifier(const std::string& class_name,
const ClassType& class_type, const Field& f,
std::ostream& h_contents,
std::ostream& cc_contents) {
- if (!f.generate_verify) return;
const Type* field_type = f.name_and_type.type;
// We only verify tagged types, not raw numbers or pointers. Structs
@@ -5173,16 +5216,7 @@ void ImplementationVisitor::GenerateClassVerifiers(
}
if (super_type) {
std::string super_name = super_type->name();
- if (super_name == "HeapObject") {
- // Special case: HeapObjectVerify checks the Map type and dispatches
- // to more specific types, so calling it here would cause infinite
- // recursion. We could consider moving that behavior into a
- // different method to make the contract of *Verify methods more
- // consistent, but for now we'll just avoid the bad case.
- cc_contents << " " << super_name << "Verify(o, isolate);\n";
- } else {
- cc_contents << " o." << super_name << "Verify(isolate);\n";
- }
+ cc_contents << " o." << super_name << "Verify(isolate);\n";
}
// Second, verify that this object is what it claims to be.
@@ -5293,6 +5327,7 @@ void ImplementationVisitor::GenerateExportedMacrosAssembler(
for (auto& declarable : GlobalContext::AllDeclarables()) {
TorqueMacro* macro = TorqueMacro::DynamicCast(declarable.get());
if (!(macro && macro->IsExportedToCSA())) continue;
+ CurrentSourcePosition::Scope position_activator(macro->Position());
cpp::Class assembler("TorqueGeneratedExportedMacrosAssembler");
std::vector<std::string> generated_parameter_names;
@@ -5359,12 +5394,12 @@ void ImplementationVisitor::GenerateCSATypes(
}
h_contents << "\n std::tuple<";
bool first = true;
- for (const Type* type : LowerType(struct_type)) {
+ for (const Type* lowered_type : LowerType(struct_type)) {
if (!first) {
h_contents << ", ";
}
first = false;
- h_contents << type->GetGeneratedTypeName();
+ h_contents << lowered_type->GetGeneratedTypeName();
}
std::vector<std::string> all_fields;
for (auto& field : struct_type->fields()) {
diff --git a/deps/v8/src/torque/implementation-visitor.h b/deps/v8/src/torque/implementation-visitor.h
index 0c9ac445c5..8ebb72cc2e 100644
--- a/deps/v8/src/torque/implementation-visitor.h
+++ b/deps/v8/src/torque/implementation-visitor.h
@@ -228,6 +228,8 @@ struct LayoutForInitialization {
VisitResult size;
};
+extern uint64_t next_unique_binding_index;
+
template <class T>
class Binding;
@@ -262,7 +264,8 @@ class Binding : public T {
name_(name),
previous_binding_(this),
used_(false),
- written_(false) {
+ written_(false),
+ unique_index_(next_unique_binding_index++) {
std::swap(previous_binding_, manager_->current_bindings_[name]);
}
template <class... Args>
@@ -300,6 +303,8 @@ class Binding : public T {
bool Written() const { return written_; }
void SetWritten() { written_ = true; }
+ uint64_t unique_index() const { return unique_index_; }
+
private:
bool SkipLintCheck() const { return name_.length() > 0 && name_[0] == '_'; }
@@ -309,26 +314,31 @@ class Binding : public T {
SourcePosition declaration_position_ = CurrentSourcePosition::Get();
bool used_;
bool written_;
+ uint64_t unique_index_;
};
template <class T>
class BlockBindings {
public:
explicit BlockBindings(BindingsManager<T>* manager) : manager_(manager) {}
- void Add(std::string name, T value, bool mark_as_used = false) {
+ Binding<T>* Add(std::string name, T value, bool mark_as_used = false) {
ReportErrorIfAlreadyBound(name);
auto binding =
std::make_unique<Binding<T>>(manager_, name, std::move(value));
+ Binding<T>* result = binding.get();
if (mark_as_used) binding->SetUsed();
bindings_.push_back(std::move(binding));
+ return result;
}
- void Add(const Identifier* name, T value, bool mark_as_used = false) {
+ Binding<T>* Add(const Identifier* name, T value, bool mark_as_used = false) {
ReportErrorIfAlreadyBound(name->value);
auto binding =
std::make_unique<Binding<T>>(manager_, name, std::move(value));
+ Binding<T>* result = binding.get();
if (mark_as_used) binding->SetUsed();
bindings_.push_back(std::move(binding));
+ return result;
}
std::vector<Binding<T>*> bindings() const {
@@ -433,7 +443,7 @@ class ImplementationVisitor {
public:
void GenerateBuiltinDefinitionsAndInterfaceDescriptors(
const std::string& output_directory);
- void GenerateClassFieldOffsets(const std::string& output_directory);
+ void GenerateVisitorLists(const std::string& output_directory);
void GenerateBitFields(const std::string& output_directory);
void GeneratePrintDefinitions(const std::string& output_directory);
void GenerateClassDefinitions(const std::string& output_directory);
diff --git a/deps/v8/src/torque/kythe-data.cc b/deps/v8/src/torque/kythe-data.cc
new file mode 100644
index 0000000000..4ef6c2910a
--- /dev/null
+++ b/deps/v8/src/torque/kythe-data.cc
@@ -0,0 +1,187 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/kythe-data.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+DEFINE_CONTEXTUAL_VARIABLE(KytheData)
+
+namespace {
+
+KythePosition MakeKythePosition(const SourcePosition& pos) {
+ KythePosition p;
+ if (pos.source.IsValid()) {
+ p.file_path = SourceFileMap::PathFromV8Root(pos.source);
+ } else {
+ p.file_path = "UNKNOWN";
+ }
+ p.start_offset = pos.start.offset;
+ p.end_offset = pos.end.offset;
+ return p;
+}
+
+} // namespace
+
+// Constants
+kythe_entity_t KytheData::AddConstantDefinition(const Value* constant) {
+ DCHECK(constant->IsNamespaceConstant() || constant->IsExternConstant());
+ KytheData* that = &KytheData::Get();
+ // Check if we know the constant already.
+ auto it = that->constants_.find(constant);
+ if (it != that->constants_.end()) return it->second;
+
+ // Register this constant.
+ KythePosition pos = MakeKythePosition(constant->name()->pos);
+ kythe_entity_t constant_id = that->consumer_->AddDefinition(
+ KytheConsumer::Kind::Constant, constant->name()->value, pos);
+ that->constants_.insert(it, std::make_pair(constant, constant_id));
+ return constant_id;
+}
+
+void KytheData::AddConstantUse(SourcePosition use_position,
+ const Value* constant) {
+ DCHECK(constant->IsNamespaceConstant() || constant->IsExternConstant());
+ KytheData* that = &Get();
+ kythe_entity_t constant_id = AddConstantDefinition(constant);
+ KythePosition use_pos = MakeKythePosition(use_position);
+ that->consumer_->AddUse(KytheConsumer::Kind::Constant, constant_id, use_pos);
+}
+
+// Callables
+kythe_entity_t KytheData::AddFunctionDefinition(Callable* callable) {
+ KytheData* that = &KytheData::Get();
+ // Check if we know the caller already.
+ auto it = that->callables_.find(callable);
+ if (it != that->callables_.end()) return it->second;
+
+ // Register this callable.
+ auto ident_pos = callable->IdentifierPosition();
+ kythe_entity_t callable_id = that->consumer_->AddDefinition(
+ KytheConsumer::Kind::Function, callable->ExternalName(),
+ MakeKythePosition(ident_pos));
+ that->callables_.insert(it, std::make_pair(callable, callable_id));
+ return callable_id;
+}
+
+void KytheData::AddCall(Callable* caller, SourcePosition call_position,
+ Callable* callee) {
+ if (!caller) return; // Ignore those for now.
+ DCHECK_NOT_NULL(caller);
+ DCHECK_NOT_NULL(callee);
+ KytheData* that = &Get();
+ if (call_position.source.IsValid()) {
+ kythe_entity_t caller_id = AddFunctionDefinition(caller);
+ kythe_entity_t callee_id = AddFunctionDefinition(callee);
+
+ KythePosition call_pos = MakeKythePosition(call_position);
+ that->consumer_->AddCall(KytheConsumer::Kind::Function, caller_id, call_pos,
+ callee_id);
+ }
+}
+
+// Class fields
+kythe_entity_t KytheData::AddClassFieldDefinition(const Field* field) {
+ DCHECK(field);
+ KytheData* that = &KytheData::Get();
+ // Check if we know that field already.
+ auto it = that->class_fields_.find(field);
+ if (it != that->class_fields_.end()) return it->second;
+ // Register this field.
+ KythePosition pos = MakeKythePosition(field->pos);
+ kythe_entity_t field_id = that->consumer_->AddDefinition(
+ KytheConsumer::Kind::ClassField, field->name_and_type.name, pos);
+ that->class_fields_.insert(it, std::make_pair(field, field_id));
+ return field_id;
+}
+
+void KytheData::AddClassFieldUse(SourcePosition use_position,
+ const Field* field) {
+ DCHECK(field);
+ KytheData* that = &KytheData::Get();
+ kythe_entity_t field_id = AddClassFieldDefinition(field);
+
+ KythePosition use_pos = MakeKythePosition(use_position);
+ that->consumer_->AddUse(KytheConsumer::Kind::ClassField, field_id, use_pos);
+}
+
+// Bindings
+kythe_entity_t KytheData::AddBindingDefinition(Binding<LocalValue>* binding) {
+ CHECK(binding);
+ const uint64_t binding_index = binding->unique_index();
+ return AddBindingDefinitionImpl(binding_index, binding->name(),
+ binding->declaration_position());
+}
+
+kythe_entity_t KytheData::AddBindingDefinition(Binding<LocalLabel>* binding) {
+ CHECK(binding);
+ const uint64_t binding_index = binding->unique_index();
+ return AddBindingDefinitionImpl(binding_index, binding->name(),
+ binding->declaration_position());
+}
+
+kythe_entity_t KytheData::AddBindingDefinitionImpl(
+ uint64_t binding_index, const std::string& name,
+ const SourcePosition& ident_pos) {
+ KytheData* that = &KytheData::Get();
+ // Check if we know the binding already.
+ auto it = that->local_bindings_.find(binding_index);
+ if (it != that->local_bindings_.end()) return it->second;
+ // Register this binding.
+ kythe_entity_t binding_id = that->consumer_->AddDefinition(
+ KytheConsumer::Kind::Variable, name, MakeKythePosition(ident_pos));
+ that->local_bindings_.insert(it, std::make_pair(binding_index, binding_id));
+ return binding_id;
+}
+
+void KytheData::AddBindingUse(SourcePosition use_position,
+ Binding<LocalValue>* binding) {
+ CHECK(binding);
+ KytheData* that = &KytheData::Get();
+ kythe_entity_t binding_id = AddBindingDefinition(binding);
+
+ KythePosition use_pos = MakeKythePosition(use_position);
+ that->consumer_->AddUse(KytheConsumer::Kind::Variable, binding_id, use_pos);
+}
+
+void KytheData::AddBindingUse(SourcePosition use_position,
+ Binding<LocalLabel>* binding) {
+ CHECK(binding);
+ KytheData* that = &KytheData::Get();
+ kythe_entity_t binding_id = AddBindingDefinition(binding);
+
+ KythePosition use_pos = MakeKythePosition(use_position);
+ that->consumer_->AddUse(KytheConsumer::Kind::Variable, binding_id, use_pos);
+}
+
+// Types
+kythe_entity_t KytheData::AddTypeDefinition(const Declarable* type_decl) {
+ CHECK(type_decl);
+ KytheData* that = &KytheData::Get();
+ // Check if we know that type already.
+ auto it = that->types_.find(type_decl);
+ if (it != that->types_.end()) return it->second;
+ // Register this type.
+ KythePosition pos = MakeKythePosition(type_decl->IdentifierPosition());
+ kythe_entity_t type_id = that->consumer_->AddDefinition(
+ KytheConsumer::Kind::Type, type_decl->type_name(), pos);
+ that->types_.insert(it, std::make_pair(type_decl, type_id));
+ return type_id;
+}
+
+void KytheData::AddTypeUse(SourcePosition use_position,
+ const Declarable* type_decl) {
+ CHECK(type_decl);
+ KytheData* that = &KytheData::Get();
+ kythe_entity_t type_id = AddTypeDefinition(type_decl);
+
+ KythePosition use_pos = MakeKythePosition(use_position);
+ that->consumer_->AddUse(KytheConsumer::Kind::Type, type_id, use_pos);
+}
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/kythe-data.h b/deps/v8/src/torque/kythe-data.h
new file mode 100644
index 0000000000..ba18841949
--- /dev/null
+++ b/deps/v8/src/torque/kythe-data.h
@@ -0,0 +1,110 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_KYTHE_DATA_H_
+#define V8_TORQUE_KYTHE_DATA_H_
+
+#include <map>
+
+#include "src/torque/ast.h"
+#include "src/torque/contextual.h"
+#include "src/torque/global-context.h"
+#include "src/torque/implementation-visitor.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+struct KythePosition {
+ std::string file_path;
+ uint64_t start_offset;
+ uint64_t end_offset;
+};
+
+using kythe_entity_t = uint64_t;
+
+class KytheConsumer {
+ public:
+ enum class Kind {
+ Unspecified,
+ Constant,
+ Function,
+ ClassField,
+ Variable,
+ Type,
+ };
+
+ virtual ~KytheConsumer() = 0;
+
+ virtual kythe_entity_t AddDefinition(Kind kind, std::string name,
+ KythePosition pos) = 0;
+
+ virtual void AddUse(Kind kind, kythe_entity_t entity,
+ KythePosition use_pos) = 0;
+ virtual void AddCall(Kind kind, kythe_entity_t caller_entity,
+ KythePosition call_pos,
+ kythe_entity_t callee_entity) = 0;
+};
+inline KytheConsumer::~KytheConsumer() = default;
+
+class KytheData : public ContextualClass<KytheData> {
+ public:
+ KytheData() = default;
+
+ static void SetConsumer(KytheConsumer* consumer) {
+ Get().consumer_ = consumer;
+ }
+
+ // Constants
+ V8_EXPORT_PRIVATE static kythe_entity_t AddConstantDefinition(
+ const Value* constant);
+ V8_EXPORT_PRIVATE static void AddConstantUse(SourcePosition use_position,
+ const Value* constant);
+ // Callables
+ V8_EXPORT_PRIVATE static kythe_entity_t AddFunctionDefinition(
+ Callable* callable);
+ V8_EXPORT_PRIVATE static void AddCall(Callable* caller,
+ SourcePosition call_position,
+ Callable* callee);
+ // Class fields
+ V8_EXPORT_PRIVATE static kythe_entity_t AddClassFieldDefinition(
+ const Field* field);
+ V8_EXPORT_PRIVATE static void AddClassFieldUse(SourcePosition use_position,
+ const Field* field);
+ // Bindings
+ V8_EXPORT_PRIVATE static kythe_entity_t AddBindingDefinition(
+ Binding<LocalValue>* binding);
+ V8_EXPORT_PRIVATE static kythe_entity_t AddBindingDefinition(
+ Binding<LocalLabel>* binding);
+ V8_EXPORT_PRIVATE static void AddBindingUse(SourcePosition use_position,
+ Binding<LocalValue>* binding);
+ V8_EXPORT_PRIVATE static void AddBindingUse(SourcePosition use_position,
+ Binding<LocalLabel>* binding);
+
+ // Types
+ V8_EXPORT_PRIVATE static kythe_entity_t AddTypeDefinition(
+ const Declarable* type_decl);
+ V8_EXPORT_PRIVATE static void AddTypeUse(SourcePosition use_position,
+ const Declarable* type_decl);
+
+ private:
+ static kythe_entity_t AddBindingDefinitionImpl(
+ uint64_t binding_index, const std::string& name,
+ const SourcePosition& ident_pos);
+
+ KytheConsumer* consumer_;
+ std::unordered_map<const Value*, kythe_entity_t> constants_;
+ std::unordered_map<Callable*, kythe_entity_t> callables_;
+
+ std::unordered_map<const Field*, std::set<SourcePosition>> field_uses_;
+ std::unordered_map<uint64_t, kythe_entity_t> local_bindings_;
+ std::unordered_map<const Declarable*, kythe_entity_t> types_;
+ std::unordered_map<const Field*, kythe_entity_t> class_fields_;
+};
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_KYTHE_DATA_H_
diff --git a/deps/v8/src/torque/ls/message-handler.cc b/deps/v8/src/torque/ls/message-handler.cc
index e9f2224df7..66995c0c89 100644
--- a/deps/v8/src/torque/ls/message-handler.cc
+++ b/deps/v8/src/torque/ls/message-handler.cc
@@ -279,8 +279,9 @@ void HandleGotoDefinitionRequest(GotoDefinitionRequest request,
return;
}
- LineAndColumn pos{request.params().position().line(),
- request.params().position().character()};
+ auto pos =
+ LineAndColumn::WithUnknownOffset(request.params().position().line(),
+ request.params().position().character());
if (auto maybe_definition = LanguageServerData::FindDefinition(id, pos)) {
SourcePosition definition = *maybe_definition;
@@ -311,22 +312,22 @@ void HandleDocumentSymbolRequest(DocumentSymbolRequest request,
DCHECK(symbol->IsUserDefined());
if (symbol->IsMacro()) {
Macro* macro = Macro::cast(symbol);
- SymbolInformation symbol = response.add_result();
- symbol.set_name(macro->ReadableName());
- symbol.set_kind(SymbolKind::kFunction);
- symbol.location().SetTo(macro->Position());
+ SymbolInformation info = response.add_result();
+ info.set_name(macro->ReadableName());
+ info.set_kind(SymbolKind::kFunction);
+ info.location().SetTo(macro->Position());
} else if (symbol->IsBuiltin()) {
Builtin* builtin = Builtin::cast(symbol);
- SymbolInformation symbol = response.add_result();
- symbol.set_name(builtin->ReadableName());
- symbol.set_kind(SymbolKind::kFunction);
- symbol.location().SetTo(builtin->Position());
+ SymbolInformation info = response.add_result();
+ info.set_name(builtin->ReadableName());
+ info.set_kind(SymbolKind::kFunction);
+ info.location().SetTo(builtin->Position());
} else if (symbol->IsGenericCallable()) {
GenericCallable* generic = GenericCallable::cast(symbol);
- SymbolInformation symbol = response.add_result();
- symbol.set_name(generic->name());
- symbol.set_kind(SymbolKind::kFunction);
- symbol.location().SetTo(generic->Position());
+ SymbolInformation info = response.add_result();
+ info.set_name(generic->name());
+ info.set_kind(SymbolKind::kFunction);
+ info.location().SetTo(generic->Position());
} else if (symbol->IsTypeAlias()) {
const Type* type = TypeAlias::cast(symbol)->type();
SymbolKind kind =
diff --git a/deps/v8/src/torque/source-positions.h b/deps/v8/src/torque/source-positions.h
index 857efa2226..f953417fd3 100644
--- a/deps/v8/src/torque/source-positions.h
+++ b/deps/v8/src/torque/source-positions.h
@@ -30,16 +30,27 @@ class SourceId {
};
struct LineAndColumn {
+ static constexpr int kUnknownOffset = -1;
+
+ int offset;
int line;
int column;
- static LineAndColumn Invalid() { return {-1, -1}; }
+ static LineAndColumn Invalid() { return {-1, -1, -1}; }
+ static LineAndColumn WithUnknownOffset(int line, int column) {
+ return {kUnknownOffset, line, column};
+ }
bool operator==(const LineAndColumn& other) const {
- return line == other.line && column == other.column;
+ if (offset == kUnknownOffset || other.offset == kUnknownOffset) {
+ return line == other.line && column == other.column;
+ }
+ DCHECK_EQ(offset == other.offset,
+ line == other.line && column == other.column);
+ return offset == other.offset;
}
bool operator!=(const LineAndColumn& other) const {
- return !(*this == other);
+ return !operator==(other);
}
};
diff --git a/deps/v8/src/torque/torque-compiler.cc b/deps/v8/src/torque/torque-compiler.cc
index 64bad91cab..579494f845 100644
--- a/deps/v8/src/torque/torque-compiler.cc
+++ b/deps/v8/src/torque/torque-compiler.cc
@@ -50,6 +50,9 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
if (options.collect_language_server_data) {
GlobalContext::SetCollectLanguageServerData();
}
+ if (options.collect_kythe_data) {
+ GlobalContext::SetCollectKytheData();
+ }
if (options.force_assert_statements) {
GlobalContext::SetForceAssertStatements();
}
@@ -87,7 +90,7 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
implementation_visitor.GenerateBuiltinDefinitionsAndInterfaceDescriptors(
output_directory);
- implementation_visitor.GenerateClassFieldOffsets(output_directory);
+ implementation_visitor.GenerateVisitorLists(output_directory);
implementation_visitor.GenerateBitFields(output_directory);
implementation_visitor.GeneratePrintDefinitions(output_directory);
implementation_visitor.GenerateClassDefinitions(output_directory);
@@ -161,6 +164,38 @@ TorqueCompilerResult CompileTorque(std::vector<std::string> files,
return result;
}
+TorqueCompilerResult CompileTorqueForKythe(
+ std::vector<TorqueCompilationUnit> units, TorqueCompilerOptions options,
+ KytheConsumer* consumer) {
+ SourceFileMap::Scope source_map_scope(options.v8_root);
+ CurrentSourceFile::Scope unknown_source_file_scope(SourceId::Invalid());
+ CurrentAst::Scope ast_scope;
+ TorqueMessages::Scope messages_scope;
+ LanguageServerData::Scope server_data_scope;
+ KytheData::Scope kythe_scope;
+
+ KytheData::Get().SetConsumer(consumer);
+
+ TorqueCompilerResult result;
+ try {
+ for (const auto& unit : units) {
+ SourceId source_id = SourceFileMap::AddSource(unit.source_file_path);
+ CurrentSourceFile::Scope source_id_scope(source_id);
+ ParseTorque(unit.file_content);
+ }
+ CompileCurrentAst(options);
+ } catch (TorqueAbortCompilation&) {
+ // Do nothing. The relevant TorqueMessage is part of the
+ // TorqueMessages contextual.
+ }
+
+ result.source_file_map = SourceFileMap::Get();
+ result.language_server_data = std::move(LanguageServerData::Get());
+ result.messages = std::move(TorqueMessages::Get());
+
+ return result;
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/torque-compiler.h b/deps/v8/src/torque/torque-compiler.h
index 32fa41ea9b..816e42f1da 100644
--- a/deps/v8/src/torque/torque-compiler.h
+++ b/deps/v8/src/torque/torque-compiler.h
@@ -7,6 +7,7 @@
#include "src/torque/ast.h"
#include "src/torque/contextual.h"
+#include "src/torque/kythe-data.h"
#include "src/torque/server-data.h"
#include "src/torque/source-positions.h"
#include "src/torque/utils.h"
@@ -19,9 +20,10 @@ struct TorqueCompilerOptions {
std::string output_directory = "";
std::string v8_root = "";
bool collect_language_server_data = false;
+ bool collect_kythe_data = false;
- // assert(...) are only generated for debug builds. The provide
- // language server support for statements inside asserts, this flag
+ // dcheck(...) are only generated for debug builds. To provide
+ // language server support for statements inside dchecks, this flag
// can force generate them.
bool force_assert_statements = false;
@@ -52,10 +54,18 @@ struct TorqueCompilerResult {
std::vector<TorqueMessage> messages;
};
+struct TorqueCompilationUnit {
+ std::string source_file_path;
+ std::string file_content;
+};
+
V8_EXPORT_PRIVATE TorqueCompilerResult
CompileTorque(const std::string& source, TorqueCompilerOptions options);
TorqueCompilerResult CompileTorque(std::vector<std::string> files,
TorqueCompilerOptions options);
+V8_EXPORT_PRIVATE TorqueCompilerResult CompileTorqueForKythe(
+ std::vector<TorqueCompilationUnit> units, TorqueCompilerOptions options,
+ KytheConsumer* kythe_consumer);
} // namespace torque
} // namespace internal
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
index 8320b62337..542e3a42a4 100644
--- a/deps/v8/src/torque/torque-parser.cc
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -32,7 +32,7 @@ struct ExpressionWithSource {
struct TypeswitchCase {
SourcePosition pos;
- base::Optional<std::string> name;
+ base::Optional<Identifier*> name;
TypeExpression* type;
Statement* block;
};
@@ -313,9 +313,10 @@ void CheckNotDeferredStatement(Statement* statement) {
TypeExpression* AddConstexpr(TypeExpression* type) {
BasicTypeExpression* basic = BasicTypeExpression::DynamicCast(type);
if (!basic) Error("Unsupported extends clause.").Throw();
- return MakeNode<BasicTypeExpression>(basic->namespace_qualification,
- CONSTEXPR_TYPE_PREFIX + basic->name,
- basic->generic_arguments);
+ return MakeNode<BasicTypeExpression>(
+ basic->namespace_qualification,
+ MakeNode<Identifier>(CONSTEXPR_TYPE_PREFIX + basic->name->value),
+ basic->generic_arguments);
}
Expression* MakeCall(IdentifierExpression* callee,
@@ -327,7 +328,7 @@ Expression* MakeCall(IdentifierExpression* callee,
// All IdentifierExpressions are treated as label names and can be directly
// used as labels identifiers. All other statements in a call's otherwise
// must create intermediate Labels for the otherwise's statement code.
- size_t label_id = 0;
+ size_t label_id_count = 0;
std::vector<TryHandler*> temp_labels;
for (auto* statement : otherwise) {
if (auto* e = ExpressionStatement::DynamicCast(statement)) {
@@ -339,7 +340,7 @@ Expression* MakeCall(IdentifierExpression* callee,
continue;
}
}
- auto label_name = std::string("__label") + std::to_string(label_id++);
+ auto label_name = std::string("__label") + std::to_string(label_id_count++);
auto label_id = MakeNode<Identifier>(label_name);
label_id->pos = SourcePosition::Invalid();
labels.push_back(label_id);
@@ -382,12 +383,11 @@ base::Optional<ParseResult> MakeCall(ParseResultIterator* child_results) {
base::Optional<ParseResult> MakeMethodCall(ParseResultIterator* child_results) {
auto this_arg = child_results->NextAs<Expression*>();
- auto callee = child_results->NextAs<std::string>();
+ auto callee = child_results->NextAs<Identifier*>();
auto args = child_results->NextAs<std::vector<Expression*>>();
auto otherwise = child_results->NextAs<std::vector<Statement*>>();
- return ParseResult{
- MakeCall(MakeNode<IdentifierExpression>(MakeNode<Identifier>(callee)),
- this_arg, args, otherwise)};
+ return ParseResult{MakeCall(MakeNode<IdentifierExpression>(callee), this_arg,
+ args, otherwise)};
}
base::Optional<ParseResult> MakeNewExpression(
@@ -501,8 +501,8 @@ base::Optional<ParseResult> MakeAssertStatement(
auto kind_string = child_results->NextAs<Identifier*>()->value;
auto expr_with_source = child_results->NextAs<ExpressionWithSource>();
AssertStatement::AssertKind kind;
- if (kind_string == "assert") {
- kind = AssertStatement::AssertKind::kAssert;
+ if (kind_string == "dcheck") {
+ kind = AssertStatement::AssertKind::kDcheck;
} else if (kind_string == "check") {
kind = AssertStatement::AssertKind::kCheck;
} else if (kind_string == "static_assert") {
@@ -523,12 +523,6 @@ base::Optional<ParseResult> MakeDebugStatement(
return ParseResult{result};
}
-base::Optional<ParseResult> MakeVoidType(ParseResultIterator* child_results) {
- TypeExpression* result = MakeNode<BasicTypeExpression>(
- std::vector<std::string>{}, "void", std::vector<TypeExpression*>{});
- return ParseResult{result};
-}
-
base::Optional<ParseResult> MakeExternalMacro(
ParseResultIterator* child_results) {
auto transitioning = child_results->NextAs<bool>();
@@ -728,7 +722,7 @@ base::Optional<ParseResult> MakeAbstractTypeDeclaration(
constexpr_name, flags | AbstractTypeFlag::kConstexpr, constexpr_extends,
constexpr_generates);
constexpr_decl->pos = name->pos;
- Declaration* decl = constexpr_decl;
+ decl = constexpr_decl;
if (!generic_parameters.empty()) {
decl =
MakeNode<GenericTypeDeclaration>(generic_parameters, constexpr_decl);
@@ -887,8 +881,7 @@ base::Optional<ParseResult> MakeClassDeclaration(
ParseResultIterator* child_results) {
AnnotationSet annotations(
child_results,
- {ANNOTATION_GENERATE_PRINT, ANNOTATION_NO_VERIFIER, ANNOTATION_ABSTRACT,
- ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT,
+ {ANNOTATION_ABSTRACT, ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT,
ANNOTATION_DO_NOT_GENERATE_CPP_CLASS, ANNOTATION_CUSTOM_CPP_CLASS,
ANNOTATION_CUSTOM_MAP, ANNOTATION_GENERATE_BODY_DESCRIPTOR,
ANNOTATION_EXPORT, ANNOTATION_DO_NOT_GENERATE_CAST,
@@ -897,10 +890,6 @@ base::Optional<ParseResult> MakeClassDeclaration(
{ANNOTATION_RESERVE_BITS_IN_INSTANCE_TYPE,
ANNOTATION_INSTANCE_TYPE_VALUE});
ClassFlags flags = ClassFlag::kNone;
- bool generate_print = annotations.Contains(ANNOTATION_GENERATE_PRINT);
- if (generate_print) flags |= ClassFlag::kGeneratePrint;
- bool generate_verify = !annotations.Contains(ANNOTATION_NO_VERIFIER);
- if (generate_verify) flags |= ClassFlag::kGenerateVerify;
if (annotations.Contains(ANNOTATION_ABSTRACT)) {
flags |= ClassFlag::kAbstract;
}
@@ -1009,16 +998,15 @@ base::Optional<ParseResult> MakeClassDeclaration(
(flags & ClassFlag::kIsShape) == 0) {
ParameterList parameters;
parameters.names.push_back(MakeNode<Identifier>("obj"));
- parameters.types.push_back(
- MakeNode<BasicTypeExpression>(std::vector<std::string>{}, "HeapObject",
- std::vector<TypeExpression*>{}));
+ parameters.types.push_back(MakeNode<BasicTypeExpression>(
+ std::vector<std::string>{}, MakeNode<Identifier>("HeapObject"),
+ std::vector<TypeExpression*>{}));
LabelAndTypesVector labels;
labels.push_back(LabelAndTypes{MakeNode<Identifier>("CastError"),
std::vector<TypeExpression*>{}});
- TypeExpression* class_type =
- MakeNode<BasicTypeExpression>(std::vector<std::string>{}, name->value,
- std::vector<TypeExpression*>{});
+ TypeExpression* class_type = MakeNode<BasicTypeExpression>(
+ std::vector<std::string>{}, name, std::vector<TypeExpression*>{});
std::vector<std::string> namespace_qualification{
TORQUE_INTERNAL_NAMESPACE_STRING};
@@ -1043,9 +1031,8 @@ base::Optional<ParseResult> MakeClassDeclaration(
auto cast_body = MakeNode<ReturnStatement>(value);
std::vector<TypeExpression*> generic_parameters;
- generic_parameters.push_back(
- MakeNode<BasicTypeExpression>(std::vector<std::string>{}, name->value,
- std::vector<TypeExpression*>{}));
+ generic_parameters.push_back(MakeNode<BasicTypeExpression>(
+ std::vector<std::string>{}, name, std::vector<TypeExpression*>{}));
Declaration* specialization = MakeNode<SpecializationDeclaration>(
false, MakeNode<Identifier>("Cast"), generic_parameters,
@@ -1194,7 +1181,8 @@ base::Optional<ParseResult> MakeBasicTypeExpression(
child_results->NextAs<std::vector<TypeExpression*>>();
TypeExpression* result = MakeNode<BasicTypeExpression>(
std::move(namespace_qualification),
- is_constexpr ? GetConstexprName(name) : std::move(name),
+ MakeNode<Identifier>(is_constexpr ? GetConstexprName(name)
+ : std::move(name)),
std::move(generic_arguments));
return ParseResult{result};
}
@@ -1217,7 +1205,8 @@ base::Optional<ParseResult> MakeReferenceTypeExpression(
std::vector<TypeExpression*> generic_arguments{referenced_type};
TypeExpression* result = MakeNode<BasicTypeExpression>(
namespace_qualification,
- is_const ? CONST_REFERENCE_TYPE_STRING : MUTABLE_REFERENCE_TYPE_STRING,
+ MakeNode<Identifier>(is_const ? CONST_REFERENCE_TYPE_STRING
+ : MUTABLE_REFERENCE_TYPE_STRING),
generic_arguments);
return ParseResult{result};
}
@@ -1315,7 +1304,7 @@ base::Optional<ParseResult> MakeEnumDeclaration(
base::nullopt);
TypeExpression* name_type_expression =
- MakeNode<BasicTypeExpression>(name_identifier->value);
+ MakeNode<BasicTypeExpression>(name_identifier);
name_type_expression->pos = name_identifier->pos;
std::vector<Declaration*> entry_decls;
@@ -1346,7 +1335,7 @@ base::Optional<ParseResult> MakeEnumDeclaration(
entry.type.value_or(*base_type_expression), base::nullopt));
auto entry_type = MakeNode<BasicTypeExpression>(
- std::vector<std::string>{name}, entry.name->value,
+ std::vector<std::string>{name}, entry.name,
std::vector<TypeExpression*>{});
if (union_type) {
union_type = MakeNode<UnionTypeExpression>(union_type, entry_type);
@@ -1374,7 +1363,7 @@ base::Optional<ParseResult> MakeEnumDeclaration(
Identifier* constexpr_type_identifier =
MakeNode<Identifier>(std::string(CONSTEXPR_TYPE_PREFIX) + name);
TypeExpression* constexpr_type_expression = MakeNode<BasicTypeExpression>(
- std::string(CONSTEXPR_TYPE_PREFIX) + name);
+ MakeNode<Identifier>(std::string(CONSTEXPR_TYPE_PREFIX) + name));
base::Optional<TypeExpression*> base_constexpr_type_expression =
base::nullopt;
if (base_type_expression) {
@@ -1390,8 +1379,9 @@ base::Optional<ParseResult> MakeEnumDeclaration(
Statement* fromconstexpr_body = nullptr;
if (generate_nonconstexpr) {
DCHECK(base_type_expression.has_value());
- type_expr = MakeNode<BasicTypeExpression>(
- std::vector<std::string>{}, name, std::vector<TypeExpression*>{});
+ type_expr = MakeNode<BasicTypeExpression>(std::vector<std::string>{},
+ MakeNode<Identifier>(name),
+ std::vector<TypeExpression*>{});
// return %RawDownCast<Enum>(%FromConstexpr<Base>(o)))
fromconstexpr_identifier = MakeNode<Identifier>("FromConstexpr");
@@ -1440,9 +1430,10 @@ base::Optional<ParseResult> MakeEnumDeclaration(
MakeNode<Identifier>("constexpr constant " + entry_name);
entry_decls.push_back(MakeNode<ExternConstDeclaration>(
constexpr_constant_name,
- MakeNode<BasicTypeExpression>(std::vector<std::string>{},
- entry_constexpr_type,
- std::vector<TypeExpression*>{}),
+ MakeNode<BasicTypeExpression>(
+ std::vector<std::string>{},
+ MakeNode<Identifier>(entry_constexpr_type),
+ std::vector<TypeExpression*>{}),
constexpr_generates + "::" + entry_name));
entry_decls.push_back(MakeNode<ConstDeclaration>(
entry.name, *entry.type,
@@ -1461,9 +1452,10 @@ base::Optional<ParseResult> MakeEnumDeclaration(
// }
entry_decls.push_back(MakeNode<ExternConstDeclaration>(
entry.name,
- MakeNode<BasicTypeExpression>(std::vector<std::string>{},
- entry_constexpr_type,
- std::vector<TypeExpression*>{}),
+ MakeNode<BasicTypeExpression>(
+ std::vector<std::string>{},
+ MakeNode<Identifier>(entry_constexpr_type),
+ std::vector<TypeExpression*>{}),
constexpr_generates + "::" + entry_name));
}
@@ -1471,9 +1463,10 @@ base::Optional<ParseResult> MakeEnumDeclaration(
// : Enum::constexpr kEntry0): Enum
if (generate_nonconstexpr) {
TypeExpression* entry_constexpr_type_expr =
- MakeNode<BasicTypeExpression>(std::vector<std::string>{name},
- entry_constexpr_type,
- std::vector<TypeExpression*>{});
+ MakeNode<BasicTypeExpression>(
+ std::vector<std::string>{name},
+ MakeNode<Identifier>(entry_constexpr_type),
+ std::vector<TypeExpression*>{});
ParameterList parameters;
parameters.names.push_back(fromconstexpr_parameter_identifier);
@@ -1498,7 +1491,7 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
ParseResultIterator* child_results) {
auto expression = child_results->NextAs<Expression*>();
auto cases = child_results->NextAs<std::vector<TypeswitchCase>>();
- CurrentSourcePosition::Scope current_source_position(
+ CurrentSourcePosition::Scope matched_input_current_source_position(
child_results->matched_input().pos);
// typeswitch (expression) case (x1 : T1) {
@@ -1556,10 +1549,11 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
} else {
case_block = current_block;
}
- std::string name = "__case_value";
+ Identifier* name =
+ cases[i].name ? *cases[i].name : MakeNode<Identifier>("__case_value");
if (cases[i].name) name = *cases[i].name;
- case_block->statements.push_back(MakeNode<VarDeclarationStatement>(
- true, MakeNode<Identifier>(name), cases[i].type, value));
+ case_block->statements.push_back(
+ MakeNode<VarDeclarationStatement>(true, name, cases[i].type, value));
case_block->statements.push_back(cases[i].block);
if (i < cases.size() - 1) {
BlockStatement* next_block = MakeNode<BlockStatement>();
@@ -1580,11 +1574,11 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
base::Optional<ParseResult> MakeTypeswitchCase(
ParseResultIterator* child_results) {
- auto name = child_results->NextAs<base::Optional<std::string>>();
+ auto name = child_results->NextAs<base::Optional<Identifier*>>();
auto type = child_results->NextAs<TypeExpression*>();
auto block = child_results->NextAs<Statement*>();
- return ParseResult{TypeswitchCase{child_results->matched_input().pos,
- std::move(name), type, block}};
+ return ParseResult{
+ TypeswitchCase{child_results->matched_input().pos, name, type, block}};
}
base::Optional<ParseResult> MakeWhileStatement(
@@ -1722,7 +1716,8 @@ base::Optional<ParseResult> MakeCatchBlock(ParseResultIterator* child_results) {
ParameterList parameters;
parameters.names.push_back(MakeNode<Identifier>(variable));
parameters.types.push_back(MakeNode<BasicTypeExpression>(
- std::vector<std::string>{}, "JSAny", std::vector<TypeExpression*>{}));
+ std::vector<std::string>{}, MakeNode<Identifier>("JSAny"),
+ std::vector<TypeExpression*>{}));
parameters.has_varargs = false;
TryHandler* result = MakeNode<TryHandler>(
TryHandler::HandlerKind::kCatch, MakeNode<Identifier>(kCatchLabelName),
@@ -1964,11 +1959,9 @@ base::Optional<ParseResult> MakeAnnotation(ParseResultIterator* child_results) {
base::Optional<ParseResult> MakeClassField(ParseResultIterator* child_results) {
AnnotationSet annotations(
child_results,
- {ANNOTATION_NO_VERIFIER, ANNOTATION_CPP_RELAXED_STORE,
- ANNOTATION_CPP_RELAXED_LOAD, ANNOTATION_CPP_RELEASE_STORE,
- ANNOTATION_CPP_ACQUIRE_LOAD},
+ {ANNOTATION_CPP_RELAXED_STORE, ANNOTATION_CPP_RELAXED_LOAD,
+ ANNOTATION_CPP_RELEASE_STORE, ANNOTATION_CPP_ACQUIRE_LOAD},
{ANNOTATION_IF, ANNOTATION_IFNOT});
- bool generate_verify = !annotations.Contains(ANNOTATION_NO_VERIFIER);
FieldSynchronization write_synchronization = FieldSynchronization::kNone;
if (annotations.Contains(ANNOTATION_CPP_RELEASE_STORE)) {
write_synchronization = FieldSynchronization::kAcquireRelease;
@@ -2020,7 +2013,6 @@ base::Optional<ParseResult> MakeClassField(ParseResultIterator* child_results) {
std::move(conditions),
weak,
const_qualified,
- generate_verify,
read_synchronization,
write_synchronization}};
}
@@ -2270,10 +2262,6 @@ struct TorqueGrammar : Grammar {
TryOrDefault<TypeList>(Sequence({Token("("), typeList, Token(")")}))},
MakeLabelAndTypes)};
- // Result: TypeExpression*
- Symbol optionalReturnType = {Rule({Token(":"), &type}),
- Rule({}, MakeVoidType)};
-
// Result: LabelAndTypesVector
Symbol* optionalLabelList{TryOrDefault<LabelAndTypesVector>(
Sequence({Token("labels"),
@@ -2364,10 +2352,9 @@ struct TorqueGrammar : Grammar {
{&identifierExpression, &argumentList, optionalOtherwise}, MakeCall)};
// Result: Expression*
- Symbol callMethodExpression = {
- Rule({&primaryExpression, Token("."), &identifier, &argumentList,
- optionalOtherwise},
- MakeMethodCall)};
+ Symbol callMethodExpression = {Rule(
+ {&primaryExpression, Token("."), &name, &argumentList, optionalOtherwise},
+ MakeMethodCall)};
// Result: NameAndExpression
Symbol namedExpression = {
@@ -2555,7 +2542,7 @@ struct TorqueGrammar : Grammar {
MakeTypeswitchStatement),
Rule({Token("try"), &block, List<TryHandler*>(&tryHandler)},
MakeTryLabelExpression),
- Rule({OneOf({"assert", "check", "static_assert"}), Token("("),
+ Rule({OneOf({"dcheck", "check", "static_assert"}), Token("("),
&expressionWithSource, Token(")"), Token(";")},
MakeAssertStatement),
Rule({Token("while"), Token("("), expression, Token(")"), &statement},
@@ -2569,7 +2556,7 @@ struct TorqueGrammar : Grammar {
// Result: TypeswitchCase
Symbol typeswitchCase = {
Rule({Token("case"), Token("("),
- Optional<std::string>(Sequence({&identifier, Token(":")})), &type,
+ Optional<Identifier*>(Sequence({&name, Token(":")})), &type,
Token(")"), Token(":"), &block},
MakeTypeswitchCase)};
@@ -2582,7 +2569,7 @@ struct TorqueGrammar : Grammar {
Symbol method = {Rule(
{CheckIf(Token("transitioning")),
Optional<std::string>(Sequence({Token("operator"), &externalString})),
- Token("macro"), &name, &parameterListNoVararg, &optionalReturnType,
+ Token("macro"), &name, &parameterListNoVararg, Token(":"), &type,
optionalLabelList, &block},
MakeMethodDeclaration)};
@@ -2629,7 +2616,7 @@ struct TorqueGrammar : Grammar {
AsSingletonVector<Declaration*, MakeTypeAliasDeclaration>()),
Rule({Token("intrinsic"), &intrinsicName,
TryOrDefault<GenericParameters>(&genericParameters),
- &parameterListNoVararg, &optionalReturnType, &optionalBody},
+ &parameterListNoVararg, Token(":"), &type, &optionalBody},
AsSingletonVector<Declaration*, MakeIntrinsicDeclaration>()),
Rule({Token("extern"), CheckIf(Token("transitioning")),
Optional<std::string>(
@@ -2637,33 +2624,33 @@ struct TorqueGrammar : Grammar {
Token("macro"),
Optional<std::string>(Sequence({&identifier, Token("::")})), &name,
TryOrDefault<GenericParameters>(&genericParameters),
- &typeListMaybeVarArgs, &optionalReturnType, optionalLabelList,
+ &typeListMaybeVarArgs, Token(":"), &type, optionalLabelList,
Token(";")},
AsSingletonVector<Declaration*, MakeExternalMacro>()),
Rule({Token("extern"), CheckIf(Token("transitioning")),
CheckIf(Token("javascript")), Token("builtin"), &name,
TryOrDefault<GenericParameters>(&genericParameters),
- &typeListMaybeVarArgs, &optionalReturnType, Token(";")},
+ &typeListMaybeVarArgs, Token(":"), &type, Token(";")},
AsSingletonVector<Declaration*, MakeExternalBuiltin>()),
Rule({Token("extern"), CheckIf(Token("transitioning")), Token("runtime"),
- &name, &typeListMaybeVarArgs, &optionalReturnType, Token(";")},
+ &name, &typeListMaybeVarArgs, Token(":"), &type, Token(";")},
AsSingletonVector<Declaration*, MakeExternalRuntime>()),
Rule({annotations, CheckIf(Token("transitioning")),
Optional<std::string>(
Sequence({Token("operator"), &externalString})),
Token("macro"), &name,
TryOrDefault<GenericParameters>(&genericParameters),
- &parameterListNoVararg, &optionalReturnType, optionalLabelList,
+ &parameterListNoVararg, Token(":"), &type, optionalLabelList,
&optionalBody},
AsSingletonVector<Declaration*, MakeTorqueMacroDeclaration>()),
Rule({CheckIf(Token("transitioning")), CheckIf(Token("javascript")),
Token("builtin"), &name,
TryOrDefault<GenericParameters>(&genericParameters),
- &parameterListAllowVararg, &optionalReturnType, &optionalBody},
+ &parameterListAllowVararg, Token(":"), &type, &optionalBody},
AsSingletonVector<Declaration*, MakeTorqueBuiltinDeclaration>()),
Rule({CheckIf(Token("transitioning")), &name,
&genericSpecializationTypeList, &parameterListAllowVararg,
- &optionalReturnType, optionalLabelList, &block},
+ Token(":"), &type, optionalLabelList, &block},
AsSingletonVector<Declaration*, MakeSpecializationDeclaration>()),
Rule({Token("#include"), &externalString},
AsSingletonVector<Declaration*, MakeCppIncludeDeclaration>()),
diff --git a/deps/v8/src/torque/type-inference.cc b/deps/v8/src/torque/type-inference.cc
index 612d9edb07..3cffa73ce9 100644
--- a/deps/v8/src/torque/type-inference.cc
+++ b/deps/v8/src/torque/type-inference.cc
@@ -61,7 +61,7 @@ void TypeArgumentInference::Match(TypeExpression* parameter,
BasicTypeExpression::DynamicCast(parameter)) {
// If the parameter is referring to one of the type parameters, substitute
if (basic->namespace_qualification.empty() && !basic->is_constexpr) {
- auto result = type_parameter_from_name_.find(basic->name);
+ auto result = type_parameter_from_name_.find(basic->name->value);
if (result != type_parameter_from_name_.end()) {
size_t type_parameter_index = result->second;
if (type_parameter_index < num_explicit_) {
@@ -92,7 +92,7 @@ void TypeArgumentInference::Match(TypeExpression* parameter,
void TypeArgumentInference::MatchGeneric(BasicTypeExpression* parameter,
const Type* argument_type) {
QualifiedName qualified_name{parameter->namespace_qualification,
- parameter->name};
+ parameter->name->value};
GenericType* generic_type =
Declarations::LookupUniqueGenericType(qualified_name);
auto& specialized_from = argument_type->GetSpecializedFrom();
diff --git a/deps/v8/src/torque/type-visitor.cc b/deps/v8/src/torque/type-visitor.cc
index 3b94d6a512..d7b107dbe3 100644
--- a/deps/v8/src/torque/type-visitor.cc
+++ b/deps/v8/src/torque/type-visitor.cc
@@ -7,6 +7,7 @@
#include "src/common/globals.h"
#include "src/torque/declarable.h"
#include "src/torque/global-context.h"
+#include "src/torque/kythe-data.h"
#include "src/torque/server-data.h"
#include "src/torque/type-inference.h"
#include "src/torque/type-oracle.h"
@@ -117,7 +118,10 @@ void DeclareMethods(AggregateType* container_type,
signature.parameter_types.types.insert(
signature.parameter_types.types.begin() + signature.implicit_count,
container_type);
- Declarations::CreateMethod(container_type, method_name, signature, body);
+ Method* m = Declarations::CreateMethod(container_type, method_name,
+ signature, body);
+ m->SetPosition(method->pos);
+ m->SetIdentifierPosition(method->name->pos);
}
}
@@ -189,7 +193,7 @@ const StructType* TypeVisitor::ComputeType(
StructDeclaration* decl, MaybeSpecializationKey specialized_from) {
StructType* struct_type = TypeOracle::GetStructType(decl, specialized_from);
CurrentScope::Scope struct_namespace_scope(struct_type->nspace());
- CurrentSourcePosition::Scope position_activator(decl->pos);
+ CurrentSourcePosition::Scope decl_position_activator(decl->pos);
ResidueClass offset = 0;
for (auto& field : decl->fields) {
@@ -207,7 +211,6 @@ const StructType* TypeVisitor::ComputeType(
offset.SingleValue(),
false,
field.const_qualified,
- false,
FieldSynchronization::kNone,
FieldSynchronization::kNone};
auto optional_size = SizeOf(f.name_and_type.type);
@@ -315,7 +318,6 @@ const ClassType* TypeVisitor::ComputeType(
Error("non-external classes must have defined layouts");
}
}
- flags = flags | ClassFlag::kGeneratePrint | ClassFlag::kGenerateVerify;
}
if (!(flags & ClassFlag::kExtern) &&
(flags & ClassFlag::kHasSameInstanceTypeAsParent)) {
@@ -334,7 +336,8 @@ const ClassType* TypeVisitor::ComputeType(
const Type* TypeVisitor::ComputeType(TypeExpression* type_expression) {
if (auto* basic = BasicTypeExpression::DynamicCast(type_expression)) {
- QualifiedName qualified_name{basic->namespace_qualification, basic->name};
+ QualifiedName qualified_name{basic->namespace_qualification,
+ basic->name->value};
auto& args = basic->generic_arguments;
const Type* type;
SourcePosition pos = SourcePosition::Invalid();
@@ -343,12 +346,20 @@ const Type* TypeVisitor::ComputeType(TypeExpression* type_expression) {
auto* alias = Declarations::LookupTypeAlias(qualified_name);
type = alias->type();
pos = alias->GetDeclarationPosition();
+ if (GlobalContext::collect_kythe_data()) {
+ if (alias->IsUserDefined()) {
+ KytheData::AddTypeUse(basic->name->pos, alias);
+ }
+ }
} else {
auto* generic_type =
Declarations::LookupUniqueGenericType(qualified_name);
type = TypeOracle::GetGenericTypeInstance(generic_type,
ComputeTypeVector(args));
pos = generic_type->declaration()->name->pos;
+ if (GlobalContext::collect_kythe_data()) {
+ KytheData::AddTypeUse(basic->name->pos, generic_type);
+ }
}
if (GlobalContext::collect_language_server_data()) {
@@ -429,7 +440,6 @@ void TypeVisitor::VisitClassFieldsAndMethods(
class_offset.SingleValue(),
field_expression.weak,
field_expression.const_qualified,
- field_expression.generate_verify,
field_expression.read_synchronization,
field_expression.write_synchronization});
ResidueClass field_size = std::get<0>(field.GetFieldSizeInformation());
@@ -482,7 +492,8 @@ const Type* TypeVisitor::ComputeTypeForStructExpression(
ReportError("expected basic type expression referring to struct");
}
- QualifiedName qualified_name{basic->namespace_qualification, basic->name};
+ QualifiedName qualified_name{basic->namespace_qualification,
+ basic->name->value};
base::Optional<GenericType*> maybe_generic_type =
Declarations::TryLookupGenericType(qualified_name);
diff --git a/deps/v8/src/torque/types.cc b/deps/v8/src/torque/types.cc
index 5ea7fe73ca..9157268bcb 100644
--- a/deps/v8/src/torque/types.cc
+++ b/deps/v8/src/torque/types.cc
@@ -818,10 +818,10 @@ void ClassType::GenerateSliceAccessor(size_t field_index) {
// );
// }
//
- // If the field has an unknown offset, and the previous field is named p, and
- // an item in the previous field has size 4:
+ // If the field has an unknown offset, and the previous field is named p, is
+ // not const, and is of type PType with size 4:
// FieldSliceClassNameFieldName(o: ClassName) {
- // const previous = %FieldSlice<ClassName>(o, "p");
+ // const previous = %FieldSlice<ClassName, MutableSlice<PType>>(o, "p");
// return torque_internal::unsafe::New{Const,Mutable}Slice<FieldType>(
// /*object:*/ o,
// /*offset:*/ previous.offset + 4 * previous.length,
@@ -853,14 +853,21 @@ void ClassType::GenerateSliceAccessor(size_t field_index) {
const Field* previous = GetFieldPreceding(field_index);
DCHECK_NOT_NULL(previous);
- // %FieldSlice<ClassName>(o, "p")
+ const Type* previous_slice_type =
+ previous->const_qualified
+ ? TypeOracle::GetConstSliceType(previous->name_and_type.type)
+ : TypeOracle::GetMutableSliceType(previous->name_and_type.type);
+
+ // %FieldSlice<ClassName, MutableSlice<PType>>(o, "p")
Expression* previous_expression = MakeCallExpression(
- MakeIdentifierExpression({"torque_internal"}, "%FieldSlice",
- {MakeNode<PrecomputedTypeExpression>(this)}),
+ MakeIdentifierExpression(
+ {"torque_internal"}, "%FieldSlice",
+ {MakeNode<PrecomputedTypeExpression>(this),
+ MakeNode<PrecomputedTypeExpression>(previous_slice_type)}),
{parameter, MakeNode<StringLiteralExpression>(
StringLiteralQuote(previous->name_and_type.name))});
- // const previous = %FieldSlice<ClassName>(o, "p");
+ // const previous = %FieldSlice<ClassName, MutableSlice<PType>>(o, "p");
Statement* define_previous =
MakeConstDeclarationStatement("previous", previous_expression);
statements.push_back(define_previous);
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index e231fb9431..d14dfaf7b2 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -228,7 +228,6 @@ struct Field {
bool is_weak;
bool const_qualified;
- bool generate_verify;
FieldSynchronization read_synchronization;
FieldSynchronization write_synchronization;
};
@@ -670,12 +669,12 @@ class ClassType final : public AggregateType {
std::string GetGeneratedTNodeTypeNameImpl() const override;
bool IsExtern() const { return flags_ & ClassFlag::kExtern; }
bool ShouldGeneratePrint() const {
- return !IsExtern() ||
- ((flags_ & ClassFlag::kGeneratePrint) && !HasUndefinedLayout());
+ return !IsExtern() || (ShouldGenerateCppClassDefinitions() &&
+ !IsAbstract() && !HasUndefinedLayout());
}
bool ShouldGenerateVerify() const {
- return !IsExtern() || ((flags_ & ClassFlag::kGenerateVerify) &&
- (!HasUndefinedLayout() && !IsShape()));
+ return !IsExtern() || (ShouldGenerateCppClassDefinitions() &&
+ !HasUndefinedLayout() && !IsShape());
}
bool ShouldGenerateBodyDescriptor() const {
return flags_ & ClassFlag::kGenerateBodyDescriptor ||
@@ -689,9 +688,8 @@ class ClassType final : public AggregateType {
bool HasSameInstanceTypeAsParent() const {
return flags_ & ClassFlag::kHasSameInstanceTypeAsParent;
}
- bool GenerateCppClassDefinitions() const {
- return flags_ & ClassFlag::kGenerateCppClassDefinitions || !IsExtern() ||
- ShouldGenerateBodyDescriptor();
+ bool ShouldGenerateCppClassDefinitions() const {
+ return (flags_ & ClassFlag::kGenerateCppClassDefinitions) || !IsExtern();
}
bool ShouldGenerateFullClassDefinition() const {
return !IsExtern() && !(flags_ & ClassFlag::kCustomCppClass);
diff --git a/deps/v8/src/torque/utils.h b/deps/v8/src/torque/utils.h
index 327e1946c5..89633c9d3e 100644
--- a/deps/v8/src/torque/utils.h
+++ b/deps/v8/src/torque/utils.h
@@ -176,8 +176,8 @@ void PrintCommaSeparatedList(std::ostream& os, const T& list) {
struct BottomOffset {
size_t offset;
- BottomOffset& operator=(std::size_t offset) {
- this->offset = offset;
+ BottomOffset& operator=(std::size_t other_offset) {
+ this->offset = other_offset;
return *this;
}
BottomOffset& operator++() {
diff --git a/deps/v8/src/trap-handler/handler-inside-posix.cc b/deps/v8/src/trap-handler/handler-inside-posix.cc
index 173e0ba3cd..86e2fb8b8e 100644
--- a/deps/v8/src/trap-handler/handler-inside-posix.cc
+++ b/deps/v8/src/trap-handler/handler-inside-posix.cc
@@ -88,7 +88,7 @@ class UnmaskOobSignalScope {
#ifdef V8_TRAP_HANDLER_VIA_SIMULATOR
// This is the address where we continue on a failed "ProbeMemory". It's defined
-// in "handler-outside-simulators.cc".
+// in "handler-outside-simulator.cc".
extern "C" char v8_probe_memory_continuation[];
#endif // V8_TRAP_HANDLER_VIA_SIMULATOR
diff --git a/deps/v8/src/trap-handler/handler-inside-win.cc b/deps/v8/src/trap-handler/handler-inside-win.cc
index e5ce133a6b..fcccc78ee5 100644
--- a/deps/v8/src/trap-handler/handler-inside-win.cc
+++ b/deps/v8/src/trap-handler/handler-inside-win.cc
@@ -30,6 +30,10 @@
#include "src/trap-handler/trap-handler-internal.h"
#include "src/trap-handler/trap-handler.h"
+#ifdef V8_TRAP_HANDLER_VIA_SIMULATOR
+#include "src/trap-handler/trap-handler-simulator.h"
+#endif
+
namespace v8 {
namespace internal {
namespace trap_handler {
@@ -49,6 +53,12 @@ struct TEB {
PVOID thread_local_storage_pointer;
};
+#ifdef V8_TRAP_HANDLER_VIA_SIMULATOR
+// This is the address where we continue on a failed "ProbeMemory". It's defined
+// in "handler-outside-simulator.cc".
+extern "C" char v8_probe_memory_continuation[];
+#endif // V8_TRAP_HANDLER_VIA_SIMULATOR
+
bool TryHandleWasmTrap(EXCEPTION_POINTERS* exception) {
// VectoredExceptionHandlers need extreme caution. Do as little as possible
// to determine if the exception should be handled or not. Exceptions can be
@@ -71,17 +81,16 @@ bool TryHandleWasmTrap(EXCEPTION_POINTERS* exception) {
// need to run to initialize values may not have run yet, but that is not
// the case for any thread_locals used here).
TEB* pteb = reinterpret_cast<TEB*>(NtCurrentTeb());
- if (!pteb->thread_local_storage_pointer) {
- return false;
- }
+ if (!pteb->thread_local_storage_pointer) return false;
// Now safe to run more advanced logic, which may access thread_locals
// Ensure the faulting thread was actually running Wasm code.
- if (!IsThreadInWasm()) {
- return false;
- }
+ if (!IsThreadInWasm()) return false;
// Clear g_thread_in_wasm_code, primarily to protect against nested faults.
+ // The only path that resets the flag to true is if we find a landing pad (in
+ // which case this function returns true). Otherwise we leave the flag unset
+ // since we do not return to wasm code.
g_thread_in_wasm_code = false;
const EXCEPTION_RECORD* record = exception->ExceptionRecord;
@@ -89,17 +98,28 @@ bool TryHandleWasmTrap(EXCEPTION_POINTERS* exception) {
uintptr_t fault_addr = reinterpret_cast<uintptr_t>(record->ExceptionAddress);
uintptr_t landing_pad = 0;
- if (TryFindLandingPad(fault_addr, &landing_pad)) {
- exception->ContextRecord->Rip = landing_pad;
- // We will return to wasm code, so restore the g_thread_in_wasm_code flag.
- g_thread_in_wasm_code = true;
- return true;
- }
-
- // If we get here, it's not a recoverable wasm fault, so we go to the next
- // handler. Leave the g_thread_in_wasm_code flag unset since we do not return
- // to wasm code.
- return false;
+#ifdef V8_TRAP_HANDLER_VIA_SIMULATOR
+ // Only handle signals triggered by the load in {ProbeMemory}.
+ if (fault_addr != reinterpret_cast<uintptr_t>(&ProbeMemory)) return false;
+
+ // The simulated ip will be in the second parameter register (%rdx).
+ uintptr_t simulated_ip = exception->ContextRecord->Rdx;
+ if (!TryFindLandingPad(simulated_ip, &landing_pad)) return false;
+ TH_DCHECK(landing_pad != 0);
+
+ exception->ContextRecord->Rax = landing_pad;
+ // Continue at the memory probing continuation.
+ exception->ContextRecord->Rip =
+ reinterpret_cast<uintptr_t>(&v8_probe_memory_continuation);
+#else
+ if (!TryFindLandingPad(fault_addr, &landing_pad)) return false;
+
+ // Tell the caller to return to the landing pad.
+ exception->ContextRecord->Rip = landing_pad;
+#endif
+ // We will return to wasm code, so restore the g_thread_in_wasm_code flag.
+ g_thread_in_wasm_code = true;
+ return true;
}
LONG HandleWasmTrap(EXCEPTION_POINTERS* exception) {
diff --git a/deps/v8/src/trap-handler/handler-outside-simulator.cc b/deps/v8/src/trap-handler/handler-outside-simulator.cc
index cc1e20ee21..d59debe625 100644
--- a/deps/v8/src/trap-handler/handler-outside-simulator.cc
+++ b/deps/v8/src/trap-handler/handler-outside-simulator.cc
@@ -15,10 +15,14 @@
asm(
".globl " SYMBOL(ProbeMemory) " \n"
SYMBOL(ProbeMemory) ": \n"
- // First parameter (address) passed in %rdi.
- // The second parameter (pc) is unused here. It is read by the trap handler
- // instead.
+// First parameter (address) passed in %rdi on Linux/Mac, and %rcx on Windows.
+// The second parameter (pc) is unused here. It is read by the trap handler
+// instead.
+#if V8_OS_WIN
+ " movb (%rcx), %al \n"
+#else
" movb (%rdi), %al \n"
+#endif // V8_OS_WIN
// Return 0 on success.
" xorl %eax, %eax \n"
// Place an additional "ret" here instead of falling through to the one
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index 0b3a6e0a70..79ddf56653 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -25,8 +25,9 @@ namespace trap_handler {
// Arm64 (non-simulator) on Mac.
#elif V8_TARGET_ARCH_ARM64 && V8_HOST_ARCH_ARM64 && V8_OS_MACOSX
#define V8_TRAP_HANDLER_SUPPORTED true
-// Arm64 simulator on x64 on Linux or Mac.
-#elif V8_TARGET_ARCH_ARM64 && V8_HOST_ARCH_X64 && (V8_OS_LINUX || V8_OS_MACOSX)
+// Arm64 simulator on x64 on Linux, Mac, or Windows.
+#elif V8_TARGET_ARCH_ARM64 && V8_HOST_ARCH_X64 && \
+ (V8_OS_LINUX || V8_OS_MACOSX || V8_OS_WIN)
#define V8_TRAP_HANDLER_VIA_SIMULATOR
#define V8_TRAP_HANDLER_SUPPORTED true
// Everything else is unsupported.
diff --git a/deps/v8/src/utils/allocation.cc b/deps/v8/src/utils/allocation.cc
index 349a79c385..5eb6a41f31 100644
--- a/deps/v8/src/utils/allocation.cc
+++ b/deps/v8/src/utils/allocation.cc
@@ -63,21 +63,12 @@ class PageAllocatorInitializer {
PageAllocator* page_allocator() const { return page_allocator_; }
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- PageAllocator* data_cage_page_allocator() const {
- return data_cage_page_allocator_;
- }
-#endif
-
void SetPageAllocatorForTesting(PageAllocator* allocator) {
page_allocator_ = allocator;
}
private:
PageAllocator* page_allocator_;
-#ifdef V8_VIRTUAL_MEMORY_CAGE
- PageAllocator* data_cage_page_allocator_;
-#endif
};
DEFINE_LAZY_LEAKY_OBJECT_GETTER(PageAllocatorInitializer,
@@ -95,16 +86,14 @@ v8::PageAllocator* GetPlatformPageAllocator() {
}
#ifdef V8_VIRTUAL_MEMORY_CAGE
-// TODO(chromium:1218005) once we disallow disabling the cage, name this e.g.
-// "GetPlatformDataPageAllocator", and set it to the PlatformPageAllocator when
-// V8_VIRTUAL_MEMORY_CAGE is not defined. Then use that allocator whenever
-// allocating ArrayBuffer backing stores inside v8.
-v8::PageAllocator* GetPlatformDataCagePageAllocator() {
+v8::PageAllocator* GetVirtualMemoryCagePageAllocator() {
+ // TODO(chromium:1218005) remove this code once the cage is no longer
+ // optional.
if (GetProcessWideVirtualMemoryCage()->is_disabled()) {
return GetPlatformPageAllocator();
} else {
CHECK(GetProcessWideVirtualMemoryCage()->is_initialized());
- return GetProcessWideVirtualMemoryCage()->GetDataCagePageAllocator();
+ return GetProcessWideVirtualMemoryCage()->page_allocator();
}
}
#endif
@@ -372,7 +361,6 @@ bool VirtualMemoryCage::InitReservation(
VirtualMemory(params.page_allocator, existing_reservation.begin(),
existing_reservation.size());
base_ = reservation_.address() + params.base_bias_size;
- reservation_is_owned_ = false;
} else if (params.base_alignment == ReservationParams::kAnyBaseAlignment) {
// When the base doesn't need to be aligned, the virtual memory reservation
// fails only due to OOM.
@@ -454,7 +442,8 @@ bool VirtualMemoryCage::InitReservation(
params.page_size);
page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
params.page_allocator, allocatable_base, allocatable_size,
- params.page_size);
+ params.page_size,
+ base::PageInitializationMode::kAllocatedPagesCanBeUninitialized);
return true;
}
@@ -462,13 +451,7 @@ void VirtualMemoryCage::Free() {
if (IsReserved()) {
base_ = kNullAddress;
page_allocator_.reset();
- if (reservation_is_owned_) {
- reservation_.Free();
- } else {
- // Reservation is owned by the Platform.
- DCHECK(V8_VIRTUAL_MEMORY_CAGE_BOOL);
- reservation_.Reset();
- }
+ reservation_.Free();
}
}
diff --git a/deps/v8/src/utils/allocation.h b/deps/v8/src/utils/allocation.h
index e28266ad43..7127b8efe8 100644
--- a/deps/v8/src/utils/allocation.h
+++ b/deps/v8/src/utils/allocation.h
@@ -103,11 +103,23 @@ V8_EXPORT_PRIVATE void AlignedFree(void* ptr);
V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
#ifdef V8_VIRTUAL_MEMORY_CAGE
-// Returns the platform data cage page allocator instance. Guaranteed to be a
-// valid pointer.
-V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformDataCagePageAllocator();
+// Returns the virtual memory cage page allocator instance for allocating pages
+// inside the virtual memory cage. Guaranteed to be a valid pointer.
+V8_EXPORT_PRIVATE v8::PageAllocator* GetVirtualMemoryCagePageAllocator();
#endif
+// Returns the appropriate page allocator to use for ArrayBuffer backing stores.
+// If the virtual memory cage is enabled, these must be allocated inside the
+// cage and so this will be the CagePageAllocator. Otherwise it will be the
+// PlatformPageAllocator.
+inline v8::PageAllocator* GetArrayBufferPageAllocator() {
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ return GetVirtualMemoryCagePageAllocator();
+#else
+ return GetPlatformPageAllocator();
+#endif
+}
+
// Sets the given page allocator as the platform page allocator and returns
// the current one. This function *must* be used only for testing purposes.
// It is not thread-safe and the testing infrastructure should ensure that
@@ -374,11 +386,6 @@ class VirtualMemoryCage {
protected:
Address base_ = kNullAddress;
std::unique_ptr<base::BoundedPageAllocator> page_allocator_;
- // Whether this cage owns the virtual memory reservation and thus should
- // release it upon destruction. TODO(chromium:1218005) this is only needed
- // when V8_VIRTUAL_MEMORY_CAGE is enabled. Maybe we can remove this again e.g.
- // by merging this class and v8::VirtualMemoryCage in v8-platform.h.
- bool reservation_is_owned_ = true;
VirtualMemory reservation_;
};
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 1d29ce72bc..e10a18a560 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -452,6 +452,13 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
LoadTaggedPointerField(dst, MemOperand{instance, offset});
}
+void LiftoffAssembler::LoadExternalPointer(Register dst, Register instance,
+ int offset, ExternalPointerTag tag,
+ Register isolate_root) {
+ LoadExternalPointerField(dst, FieldMemOperand(instance, offset), tag,
+ isolate_root);
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
Str(instance, liftoff::GetInstanceOperand());
}
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 5f92d50f6f..2d922d3b2e 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -2728,7 +2728,7 @@ inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
assm->cmov(zero, dst.gp(), tmp);
}
-template <void (TurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
+template <void (SharedTurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src,
base::Optional<CpuFeature> feature = base::nullopt) {
@@ -3133,17 +3133,11 @@ void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
// Different register alias requirements depending on CpuFeatures supported:
- if (CpuFeatures::IsSupported(AVX)) {
- // 1. AVX, no requirements.
+ if (CpuFeatures::IsSupported(AVX) || CpuFeatures::IsSupported(SSE4_2)) {
+ // 1. AVX, or SSE4_2 no requirements (I64x2GtS takes care of aliasing).
I64x2GtS(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
- } else if (CpuFeatures::IsSupported(SSE4_2)) {
- // 2. SSE4_2, dst == lhs.
- if (dst != lhs) {
- movaps(dst.fp(), lhs.fp());
- }
- I64x2GtS(dst.fp(), dst.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
} else {
- // 3. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
+ // 2. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
if (dst == lhs || dst == rhs) {
LiftoffRegister tmp = GetUnusedRegister(
RegClass::kFpReg, LiftoffRegList::ForRegs(lhs, rhs));
@@ -3863,19 +3857,7 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
LiftoffRegister tmp2 =
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs, tmp1));
- Movaps(tmp1.fp(), lhs.fp());
- Movaps(tmp2.fp(), rhs.fp());
- // Multiply high dword of each qword of left with right.
- Psrlq(tmp1.fp(), byte{32});
- Pmuludq(tmp1.fp(), tmp1.fp(), rhs.fp());
- // Multiply high dword of each qword of right with left.
- Psrlq(tmp2.fp(), byte{32});
- Pmuludq(tmp2.fp(), tmp2.fp(), lhs.fp());
- Paddq(tmp2.fp(), tmp2.fp(), tmp1.fp());
- Psllq(tmp2.fp(), tmp2.fp(), byte{32});
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmuludq, &Assembler::pmuludq>(
- this, dst, lhs, rhs);
- Paddq(dst.fp(), dst.fp(), tmp2.fp());
+ I64x2Mul(dst.fp(), lhs.fp(), rhs.fp(), tmp1.fp(), tmp2.fp());
}
void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
@@ -3933,28 +3915,14 @@ void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Psrld(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{1});
- Andps(dst.fp(), liftoff::kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psrld(dst.fp(), dst.fp(), byte{1});
- Andps(dst.fp(), src.fp());
- }
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ Absps(dst.fp(), src.fp(), tmp);
}
void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Pslld(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{31});
- Xorps(dst.fp(), liftoff::kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Pslld(dst.fp(), dst.fp(), byte{31});
- Xorps(dst.fp(), src.fp());
- }
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ Negps(dst.fp(), src.fp(), tmp);
}
void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
@@ -4016,61 +3984,12 @@ void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // The minps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minps in both orders, merge the results, and adjust.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vminps(liftoff::kScratchDoubleReg, lhs.fp(), rhs.fp());
- vminps(dst.fp(), rhs.fp(), lhs.fp());
- } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
- XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movaps(liftoff::kScratchDoubleReg, src);
- minps(liftoff::kScratchDoubleReg, dst.fp());
- minps(dst.fp(), src);
- } else {
- movaps(liftoff::kScratchDoubleReg, lhs.fp());
- minps(liftoff::kScratchDoubleReg, rhs.fp());
- movaps(dst.fp(), rhs.fp());
- minps(dst.fp(), lhs.fp());
- }
- // propagate -0's and NaNs, which may be non-canonical.
- Orps(liftoff::kScratchDoubleReg, dst.fp());
- // Canonicalize NaNs by quieting and clearing the payload.
- Cmpunordps(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
- Orps(liftoff::kScratchDoubleReg, dst.fp());
- Psrld(dst.fp(), dst.fp(), byte{10});
- Andnps(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
+ F32x4Min(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // The maxps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxps in both orders, merge the results, and adjust.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmaxps(liftoff::kScratchDoubleReg, lhs.fp(), rhs.fp());
- vmaxps(dst.fp(), rhs.fp(), lhs.fp());
- } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
- XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movaps(liftoff::kScratchDoubleReg, src);
- maxps(liftoff::kScratchDoubleReg, dst.fp());
- maxps(dst.fp(), src);
- } else {
- movaps(liftoff::kScratchDoubleReg, lhs.fp());
- maxps(liftoff::kScratchDoubleReg, rhs.fp());
- movaps(dst.fp(), rhs.fp());
- maxps(dst.fp(), lhs.fp());
- }
- // Find discrepancies.
- Xorps(dst.fp(), liftoff::kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- Orps(liftoff::kScratchDoubleReg, dst.fp());
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- Subps(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, dst.fp());
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- Cmpunordps(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
- Psrld(dst.fp(), dst.fp(), byte{10});
- Andnps(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
+ F32x4Max(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
@@ -4089,28 +4008,14 @@ void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Psrlq(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{1});
- Andpd(dst.fp(), liftoff::kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psrlq(dst.fp(), dst.fp(), byte{1});
- Andpd(dst.fp(), src.fp());
- }
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ Abspd(dst.fp(), src.fp(), tmp);
}
void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Psllq(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{63});
- Xorpd(dst.fp(), liftoff::kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psllq(dst.fp(), dst.fp(), byte{63});
- Xorpd(dst.fp(), src.fp());
- }
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ Negpd(dst.fp(), src.fp(), tmp);
}
void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index c94c7ece9e..cea6c9361d 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -32,7 +32,9 @@ namespace wasm {
enum LiftoffCondition {
kEqual,
+ kEqualZero = kEqual, // When used in a unary operation.
kUnequal,
+ kNotEqualZero = kUnequal, // When used in a unary operation.
kSignedLessThan,
kSignedLessEqual,
kSignedGreaterThan,
@@ -43,8 +45,8 @@ enum LiftoffCondition {
kUnsignedGreaterEqual
};
-inline constexpr LiftoffCondition Negate(LiftoffCondition liftoff_cond) {
- switch (liftoff_cond) {
+inline constexpr LiftoffCondition Negate(LiftoffCondition cond) {
+ switch (cond) {
case kEqual:
return kUnequal;
case kUnequal:
@@ -68,6 +70,31 @@ inline constexpr LiftoffCondition Negate(LiftoffCondition liftoff_cond) {
}
}
+inline constexpr LiftoffCondition Flip(LiftoffCondition cond) {
+ switch (cond) {
+ case kEqual:
+ return kEqual;
+ case kUnequal:
+ return kUnequal;
+ case kSignedLessThan:
+ return kSignedGreaterThan;
+ case kSignedLessEqual:
+ return kSignedGreaterEqual;
+ case kSignedGreaterEqual:
+ return kSignedLessEqual;
+ case kSignedGreaterThan:
+ return kSignedLessThan;
+ case kUnsignedLessThan:
+ return kUnsignedGreaterThan;
+ case kUnsignedLessEqual:
+ return kUnsignedGreaterEqual;
+ case kUnsignedGreaterEqual:
+ return kUnsignedLessEqual;
+ case kUnsignedGreaterThan:
+ return kUnsignedLessThan;
+ }
+}
+
class LiftoffAssembler : public TurboAssembler {
public:
// Each slot in our stack frame currently has exactly 8 bytes.
@@ -668,6 +695,9 @@ class LiftoffAssembler : public TurboAssembler {
int size);
inline void LoadTaggedPointerFromInstance(Register dst, Register instance,
int offset);
+ inline void LoadExternalPointer(Register dst, Register instance, int offset,
+ ExternalPointerTag tag,
+ Register isolate_root);
inline void SpillInstance(Register instance);
inline void ResetOSRTarget();
inline void FillInstanceInto(Register dst);
@@ -975,8 +1005,8 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_cond_jump(LiftoffCondition, Label*, ValueKind value,
Register lhs, Register rhs = no_reg);
- inline void emit_i32_cond_jumpi(LiftoffCondition liftoff_cond, Label* label,
- Register lhs, int imm);
+ inline void emit_i32_cond_jumpi(LiftoffCondition, Label*, Register lhs,
+ int imm);
// Set {dst} to 1 if condition holds, 0 otherwise.
inline void emit_i32_eqz(Register dst, Register src);
inline void emit_i32_set_cond(LiftoffCondition, Register dst, Register lhs,
@@ -1506,6 +1536,10 @@ class LiftoffAssembler : public TurboAssembler {
private:
LiftoffRegister LoadI64HalfIntoRegister(VarState slot, RegPairHalf half);
+ V8_NOINLINE LiftoffRegister SpillOneRegister(LiftoffRegList candidates);
+ // Spill one or two fp registers to get a pair of adjacent fp registers.
+ LiftoffRegister SpillAdjacentFpRegisters(LiftoffRegList pinned);
+
uint32_t num_locals_ = 0;
static constexpr uint32_t kInlineLocalKinds = 16;
union {
@@ -1521,10 +1555,6 @@ class LiftoffAssembler : public TurboAssembler {
int ool_spill_space_size_ = 0;
LiftoffBailoutReason bailout_reason_ = kSuccess;
const char* bailout_detail_ = nullptr;
-
- V8_NOINLINE LiftoffRegister SpillOneRegister(LiftoffRegList candidates);
- // Spill one or two fp registers to get a pair of adjacent fp registers.
- LiftoffRegister SpillAdjacentFpRegisters(LiftoffRegList pinned);
};
std::ostream& operator<<(std::ostream& os, LiftoffAssembler::VarState);
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 65226ab408..fc5684f427 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -313,24 +313,11 @@ void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail,
#define LIST_FEATURE(name, ...) kFeature_##name,
constexpr WasmFeatures kExperimentalFeatures{
FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(LIST_FEATURE)};
- constexpr WasmFeatures kStagedFeatures{
- FOREACH_WASM_STAGING_FEATURE_FLAG(LIST_FEATURE)};
#undef LIST_FEATURE
// Bailout is allowed if any experimental feature is enabled.
if (env->enabled_features.contains_any(kExperimentalFeatures)) return;
- // Staged features should be feature complete in Liftoff according to
- // https://v8.dev/docs/wasm-shipping-checklist. Some are not though. They are
- // listed here explicitly, with a bug assigned to each of them.
-
- // TODO(7581): Fully implement reftypes in Liftoff.
- STATIC_ASSERT(kStagedFeatures.has_reftypes());
- if (reason == kRefTypes) {
- DCHECK(env->enabled_features.has_reftypes());
- return;
- }
-
// Otherwise, bailout is not allowed.
FATAL("Liftoff bailout should not happen. Cause: %s\n", detail);
}
@@ -373,6 +360,29 @@ class LiftoffCompiler {
using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>;
using ValueKindSig = LiftoffAssembler::ValueKindSig;
+ class MostlySmallValueKindSig : public Signature<ValueKind> {
+ public:
+ MostlySmallValueKindSig(Zone* zone, const FunctionSig* sig)
+ : Signature<ValueKind>(sig->return_count(), sig->parameter_count(),
+ MakeKinds(inline_storage_, zone, sig)) {}
+
+ private:
+ static constexpr size_t kInlineStorage = 8;
+
+ static ValueKind* MakeKinds(ValueKind* storage, Zone* zone,
+ const FunctionSig* sig) {
+ const size_t size = sig->parameter_count() + sig->return_count();
+ if (V8_UNLIKELY(size > kInlineStorage)) {
+ storage = zone->NewArray<ValueKind>(size);
+ }
+ std::transform(sig->all().begin(), sig->all().end(), storage,
+ [](ValueType type) { return type.kind(); });
+ return storage;
+ }
+
+ ValueKind inline_storage_[kInlineStorage];
+ };
+
// For debugging, we need to spill registers before a trap or a stack check to
// be able to inspect them.
struct SpilledRegistersForInspection : public ZoneObject {
@@ -800,7 +810,7 @@ class LiftoffCompiler {
// is never a position of any instruction in the function.
StackCheck(decoder, 0);
- if (FLAG_wasm_dynamic_tiering) {
+ if (env_->dynamic_tiering == DynamicTiering::kEnabled) {
// TODO(arobin): Avoid spilling registers unconditionally.
__ SpillAllRegisters();
CODE_COMMENT("dynamic tiering");
@@ -832,8 +842,8 @@ class LiftoffCompiler {
// Check if the number of calls is a power of 2.
__ emit_i32_and(old_number_of_calls.gp(), old_number_of_calls.gp(),
new_number_of_calls.gp());
- // Unary "unequal" means "different from zero".
- __ emit_cond_jump(kUnequal, &no_tierup, kI32, old_number_of_calls.gp());
+ __ emit_cond_jump(kNotEqualZero, &no_tierup, kI32,
+ old_number_of_calls.gp());
TierUpFunction(decoder);
// After the runtime call, the instance cache register is clobbered (we
// reset it already in {SpillAllRegisters} above, but then we still access
@@ -1009,13 +1019,11 @@ class LiftoffCompiler {
LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress, kSystemPointerSize,
{});
__ Load(LiftoffRegister{flag}, flag, no_reg, 0, LoadType::kI32Load8U, {});
- // Unary "unequal" means "not equals zero".
- __ emit_cond_jump(kUnequal, &do_break, kI32, flag);
+ __ emit_cond_jump(kNotEqualZero, &do_break, kI32, flag);
// Check if we should stop on "script entry".
LOAD_INSTANCE_FIELD(flag, BreakOnEntry, kUInt8Size, {});
- // Unary "equal" means "equals zero".
- __ emit_cond_jump(kEqual, &no_break, kI32, flag);
+ __ emit_cond_jump(kEqualZero, &no_break, kI32, flag);
__ bind(&do_break);
EmitBreakpoint(decoder);
@@ -1254,6 +1262,46 @@ class LiftoffCompiler {
}
}
+ void JumpIfFalse(FullDecoder* decoder, Label* false_dst) {
+ LiftoffCondition cond =
+ test_and_reset_outstanding_op(kExprI32Eqz) ? kNotEqualZero : kEqualZero;
+
+ if (!has_outstanding_op()) {
+ // Unary comparison.
+ Register value = __ PopToRegister().gp();
+ __ emit_cond_jump(cond, false_dst, kI32, value);
+ return;
+ }
+
+ // Binary comparison of i32 values.
+ cond = Negate(GetCompareCondition(outstanding_op_));
+ outstanding_op_ = kNoOutstandingOp;
+ LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back();
+ if (rhs_slot.is_const()) {
+ // Compare to a constant.
+ int32_t rhs_imm = rhs_slot.i32_const();
+ __ cache_state()->stack_state.pop_back();
+ Register lhs = __ PopToRegister().gp();
+ __ emit_i32_cond_jumpi(cond, false_dst, lhs, rhs_imm);
+ return;
+ }
+
+ Register rhs = __ PopToRegister().gp();
+ LiftoffAssembler::VarState lhs_slot = __ cache_state()->stack_state.back();
+ if (lhs_slot.is_const()) {
+ // Compare a constant to an arbitrary value.
+ int32_t lhs_imm = lhs_slot.i32_const();
+ __ cache_state()->stack_state.pop_back();
+ // Flip the condition, because {lhs} and {rhs} are swapped.
+ __ emit_i32_cond_jumpi(Flip(cond), false_dst, rhs, lhs_imm);
+ return;
+ }
+
+ // Compare two arbitrary values.
+ Register lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)).gp();
+ __ emit_cond_jump(cond, false_dst, kI32, lhs, rhs);
+ }
+
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
DCHECK_EQ(if_block, decoder->control_at(0));
DCHECK(if_block->is_if());
@@ -1261,25 +1309,8 @@ class LiftoffCompiler {
// Allocate the else state.
if_block->else_state = std::make_unique<ElseState>();
- // Test the condition, jump to else if zero.
- Register value = __ PopToRegister().gp();
- if (!has_outstanding_op()) {
- // Unary "equal" means "equals zero".
- __ emit_cond_jump(kEqual, if_block->else_state->label.get(), kI32, value);
- } else if (outstanding_op_ == kExprI32Eqz) {
- // Unary "unequal" means "not equals zero".
- __ emit_cond_jump(kUnequal, if_block->else_state->label.get(), kI32,
- value);
- outstanding_op_ = kNoOutstandingOp;
- } else {
- // Otherwise, it's an i32 compare opcode.
- LiftoffCondition cond = Negate(GetCompareCondition(outstanding_op_));
- Register rhs = value;
- Register lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)).gp();
- __ emit_cond_jump(cond, if_block->else_state->label.get(), kI32, lhs,
- rhs);
- outstanding_op_ = kNoOutstandingOp;
- }
+ // Test the condition on the value stack, jump to else if zero.
+ JumpIfFalse(decoder, if_block->else_state->label.get());
// Store the state (after popping the value) for executing the else branch.
if_block->else_state->state.Split(*__ cache_state());
@@ -2313,7 +2344,7 @@ class LiftoffCompiler {
__ PushRegister(kind, value);
}
- void GlobalSet(FullDecoder* decoder, const Value& value,
+ void GlobalSet(FullDecoder* decoder, const Value&,
const GlobalIndexImmediate<validate>& imm) {
auto* global = &env_->module->globals[imm.index];
ValueKind kind = global->type.kind();
@@ -2493,23 +2524,9 @@ class LiftoffCompiler {
}
Label cont_false;
- Register value = __ PopToRegister().gp();
- if (!has_outstanding_op()) {
- // Unary "equal" means "equals zero".
- __ emit_cond_jump(kEqual, &cont_false, kI32, value);
- } else if (outstanding_op_ == kExprI32Eqz) {
- // Unary "unequal" means "not equals zero".
- __ emit_cond_jump(kUnequal, &cont_false, kI32, value);
- outstanding_op_ = kNoOutstandingOp;
- } else {
- // Otherwise, it's an i32 compare opcode.
- LiftoffCondition cond = Negate(GetCompareCondition(outstanding_op_));
- Register rhs = value;
- Register lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)).gp();
- __ emit_cond_jump(cond, &cont_false, kI32, lhs, rhs);
- outstanding_op_ = kNoOutstandingOp;
- }
+ // Test the condition on the value stack, jump to {cont_false} if zero.
+ JumpIfFalse(decoder, &cont_false);
BrOrRet(decoder, depth, 0);
__ bind(&cont_false);
@@ -2693,8 +2710,7 @@ class LiftoffCompiler {
__ emit_u32_to_intptr(index_ptrsize, index_ptrsize);
} else if (kSystemPointerSize == kInt32Size) {
DCHECK_GE(kMaxUInt32, env_->max_memory_size);
- // Unary "unequal" means "not equals zero".
- __ emit_cond_jump(kUnequal, trap_label, kI32, index.high_gp());
+ __ emit_cond_jump(kNotEqualZero, trap_label, kI32, index.high_gp());
}
uintptr_t end_offset = offset + access_size - 1u;
@@ -2757,14 +2773,17 @@ class LiftoffCompiler {
// Before making the runtime call, spill all cache registers.
__ SpillAllRegisters();
- LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
+ LiftoffRegList pinned;
+ if (index != no_reg) pinned.set(index);
// Get one register for computing the effective offset (offset + index).
LiftoffRegister effective_offset =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- // TODO(clemensb): Do a 64-bit addition here if memory64 is used.
DCHECK_GE(kMaxUInt32, offset);
__ LoadConstant(effective_offset, WasmValue(static_cast<uint32_t>(offset)));
- __ emit_i32_add(effective_offset.gp(), effective_offset.gp(), index);
+ if (index != no_reg) {
+ // TODO(clemensb): Do a 64-bit addition here if memory64 is used.
+ __ emit_i32_add(effective_offset.gp(), effective_offset.gp(), index);
+ }
// Get a register to hold the stack slot for MemoryTracingInfo.
LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
@@ -4157,8 +4176,9 @@ class LiftoffCompiler {
Load64BitExceptionValue(value, values_array, index, pinned);
break;
case kF64: {
- RegClass rc = reg_class_for(kI64);
- LiftoffRegister tmp_reg = pinned.set(__ GetUnusedRegister(rc, pinned));
+ RegClass rc_i64 = reg_class_for(kI64);
+ LiftoffRegister tmp_reg =
+ pinned.set(__ GetUnusedRegister(rc_i64, pinned));
Load64BitExceptionValue(tmp_reg, values_array, index, pinned);
__ emit_type_conversion(kExprF64ReinterpretI64, value, tmp_reg,
nullptr);
@@ -4877,7 +4897,7 @@ class LiftoffCompiler {
__ cache_state()->stack_state.pop_back(2);
RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
- __ SmiUntag(kReturnRegister0);
+ __ SmiToInt32(kReturnRegister0);
__ PushRegister(kI32, LiftoffRegister(kReturnRegister0));
}
@@ -5165,7 +5185,50 @@ class LiftoffCompiler {
void ArrayInit(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
const base::Vector<Value>& elements, const Value& rtt,
Value* result) {
- UNREACHABLE();
+ ValueKind rtt_kind = rtt.type.kind();
+ ValueKind elem_kind = imm.array_type->element_type().kind();
+ // Allocate the array.
+ {
+ LiftoffAssembler::VarState rtt_var =
+ __ cache_state()->stack_state.end()[-1];
+
+ LiftoffRegList pinned;
+
+ LiftoffRegister elem_size_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(elem_size_reg, WasmValue(element_size_bytes(elem_kind)));
+ LiftoffAssembler::VarState elem_size_var(kI32, elem_size_reg, 0);
+
+ LiftoffRegister length_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(length_reg,
+ WasmValue(static_cast<int32_t>(elements.size())));
+ LiftoffAssembler::VarState length_var(kI32, length_reg, 0);
+
+ CallRuntimeStub(WasmCode::kWasmAllocateArray_Uninitialized,
+ MakeSig::Returns(kRef).Params(rtt_kind, kI32, kI32),
+ {rtt_var, length_var, elem_size_var},
+ decoder->position());
+ // Drop the RTT.
+ __ DropValues(1);
+ }
+
+ // Initialize the array with stack arguments.
+ LiftoffRegister array(kReturnRegister0);
+ if (!CheckSupportedType(decoder, elem_kind, "array.init")) return;
+ for (int i = static_cast<int>(elements.size()) - 1; i >= 0; i--) {
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(array);
+ LiftoffRegister element = pinned.set(__ PopToRegister(pinned));
+ LiftoffRegister offset_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(offset_reg, WasmValue(i << element_size_log2(elem_kind)));
+ StoreObjectField(array.gp(), offset_reg.gp(),
+ wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize),
+ element, pinned, elem_kind);
+ }
+
+ // Push the array onto the stack.
+ __ PushRegister(kRef, array);
}
// 1 bit Smi tag, 31 bits Smi shift, 1 bit i31ref high-bit truncation.
@@ -5613,20 +5676,11 @@ class LiftoffCompiler {
}
private:
- ValueKindSig* MakeKindSig(Zone* zone, const FunctionSig* sig) {
- ValueKind* reps =
- zone->NewArray<ValueKind>(sig->parameter_count() + sig->return_count());
- ValueKind* ptr = reps;
- for (ValueType type : sig->all()) *ptr++ = type.kind();
- return zone->New<ValueKindSig>(sig->return_count(), sig->parameter_count(),
- reps);
- }
-
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[], TailCall tail_call) {
- ValueKindSig* sig = MakeKindSig(compilation_zone_, imm.sig);
- for (ValueKind ret : sig->returns()) {
+ MostlySmallValueKindSig sig(compilation_zone_, imm.sig);
+ for (ValueKind ret : sig.returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
}
@@ -5656,7 +5710,7 @@ class LiftoffCompiler {
ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), pinned);
Register* explicit_instance = &imported_function_ref;
- __ PrepareCall(sig, call_descriptor, &target, explicit_instance);
+ __ PrepareCall(&sig, call_descriptor, &target, explicit_instance);
if (tail_call) {
__ PrepareTailCall(
static_cast<int>(call_descriptor->ParameterSlotCount()),
@@ -5666,12 +5720,12 @@ class LiftoffCompiler {
} else {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
- __ CallIndirect(sig, call_descriptor, target);
- FinishCall(decoder, sig, call_descriptor);
+ __ CallIndirect(&sig, call_descriptor, target);
+ FinishCall(decoder, &sig, call_descriptor);
}
} else {
// A direct call within this module just gets the current instance.
- __ PrepareCall(sig, call_descriptor);
+ __ PrepareCall(&sig, call_descriptor);
// Just encode the function index. This will be patched at instantiation.
Address addr = static_cast<Address>(imm.index);
if (tail_call) {
@@ -5685,7 +5739,7 @@ class LiftoffCompiler {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallNativeWasmCode(addr);
- FinishCall(decoder, sig, call_descriptor);
+ FinishCall(decoder, &sig, call_descriptor);
}
}
}
@@ -5693,8 +5747,8 @@ class LiftoffCompiler {
void CallIndirect(FullDecoder* decoder, const Value& index_val,
const CallIndirectImmediate<validate>& imm,
TailCall tail_call) {
- ValueKindSig* sig = MakeKindSig(compilation_zone_, imm.sig);
- for (ValueKind ret : sig->returns()) {
+ MostlySmallValueKindSig sig(compilation_zone_, imm.sig);
+ for (ValueKind ret : sig.returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
}
@@ -5818,7 +5872,7 @@ class LiftoffCompiler {
GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
Register target = scratch;
- __ PrepareCall(sig, call_descriptor, &target, explicit_instance);
+ __ PrepareCall(&sig, call_descriptor, &target, explicit_instance);
if (tail_call) {
__ PrepareTailCall(
static_cast<int>(call_descriptor->ParameterSlotCount()),
@@ -5828,16 +5882,16 @@ class LiftoffCompiler {
} else {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
- __ CallIndirect(sig, call_descriptor, target);
+ __ CallIndirect(&sig, call_descriptor, target);
- FinishCall(decoder, sig, call_descriptor);
+ FinishCall(decoder, &sig, call_descriptor);
}
}
void CallRef(FullDecoder* decoder, ValueType func_ref_type,
const FunctionSig* type_sig, TailCall tail_call) {
- ValueKindSig* sig = MakeKindSig(compilation_zone_, type_sig);
- for (ValueKind ret : sig->returns()) {
+ MostlySmallValueKindSig sig(compilation_zone_, type_sig);
+ for (ValueKind ret : sig.returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
}
compiler::CallDescriptor* call_descriptor =
@@ -5908,11 +5962,9 @@ class LiftoffCompiler {
#ifdef V8_HEAP_SANDBOX
LOAD_INSTANCE_FIELD(temp.gp(), IsolateRoot, kSystemPointerSize, pinned);
- __ LoadExternalPointerField(
- target.gp(),
- FieldOperand(func_data.gp(), WasmFunctionData::kForeignAddressOffset),
- kForeignForeignAddressTag, temp.gp(),
- TurboAssembler::IsolateRootLocation::kInScratchRegister);
+ __ LoadExternalPointer(target.gp(), func_data.gp(),
+ WasmFunctionData::kForeignAddressOffset,
+ kForeignForeignAddressTag, temp.gp());
#else
__ Load(
target, func_data.gp(), no_reg,
@@ -5942,7 +5994,7 @@ class LiftoffCompiler {
// is in {instance}.
Register target_reg = target.gp();
Register instance_reg = instance.gp();
- __ PrepareCall(sig, call_descriptor, &target_reg, &instance_reg);
+ __ PrepareCall(&sig, call_descriptor, &target_reg, &instance_reg);
if (tail_call) {
__ PrepareTailCall(
static_cast<int>(call_descriptor->ParameterSlotCount()),
@@ -5952,9 +6004,9 @@ class LiftoffCompiler {
} else {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
- __ CallIndirect(sig, call_descriptor, target_reg);
+ __ CallIndirect(&sig, call_descriptor, target_reg);
- FinishCall(decoder, sig, call_descriptor);
+ FinishCall(decoder, &sig, call_descriptor);
}
}
@@ -6104,6 +6156,53 @@ class LiftoffCompiler {
tmp_s128, lane_kind);
}
+ bool has_outstanding_op() const {
+ return outstanding_op_ != kNoOutstandingOp;
+ }
+
+ bool test_and_reset_outstanding_op(WasmOpcode opcode) {
+ DCHECK_NE(kNoOutstandingOp, opcode);
+ if (outstanding_op_ != opcode) return false;
+ outstanding_op_ = kNoOutstandingOp;
+ return true;
+ }
+
+ void TraceCacheState(FullDecoder* decoder) const {
+ if (!FLAG_trace_liftoff) return;
+ StdoutStream os;
+ for (int control_depth = decoder->control_depth() - 1; control_depth >= -1;
+ --control_depth) {
+ auto* cache_state =
+ control_depth == -1 ? __ cache_state()
+ : &decoder->control_at(control_depth)
+ ->label_state;
+ os << PrintCollection(cache_state->stack_state);
+ if (control_depth != -1) PrintF("; ");
+ }
+ os << "\n";
+ }
+
+ void DefineSafepoint() {
+ Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
+ __ cache_state()->DefineSafepoint(safepoint);
+ }
+
+ void DefineSafepointWithCalleeSavedRegisters() {
+ Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
+ __ cache_state()->DefineSafepointWithCalleeSavedRegisters(safepoint);
+ }
+
+ Register LoadInstanceIntoRegister(LiftoffRegList pinned, Register fallback) {
+ Register instance = __ cache_state()->cached_instance;
+ if (instance == no_reg) {
+ instance = __ cache_state()->TrySetCachedInstanceRegister(
+ pinned | LiftoffRegList::ForRegs(fallback));
+ if (instance == no_reg) instance = fallback;
+ __ LoadInstanceFromFrame(instance);
+ }
+ return instance;
+ }
+
static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
static constexpr base::EnumSet<ValueKind> kUnconditionallySupported{
// MVP:
@@ -6166,46 +6265,6 @@ class LiftoffCompiler {
int32_t* max_steps_;
int32_t* nondeterminism_;
- bool has_outstanding_op() const {
- return outstanding_op_ != kNoOutstandingOp;
- }
-
- void TraceCacheState(FullDecoder* decoder) const {
- if (!FLAG_trace_liftoff) return;
- StdoutStream os;
- for (int control_depth = decoder->control_depth() - 1; control_depth >= -1;
- --control_depth) {
- auto* cache_state =
- control_depth == -1 ? __ cache_state()
- : &decoder->control_at(control_depth)
- ->label_state;
- os << PrintCollection(cache_state->stack_state);
- if (control_depth != -1) PrintF("; ");
- }
- os << "\n";
- }
-
- void DefineSafepoint() {
- Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
- __ cache_state()->DefineSafepoint(safepoint);
- }
-
- void DefineSafepointWithCalleeSavedRegisters() {
- Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
- __ cache_state()->DefineSafepointWithCalleeSavedRegisters(safepoint);
- }
-
- Register LoadInstanceIntoRegister(LiftoffRegList pinned, Register fallback) {
- Register instance = __ cache_state()->cached_instance;
- if (instance == no_reg) {
- instance = __ cache_state()->TrySetCachedInstanceRegister(
- pinned | LiftoffRegList::ForRegs(fallback));
- if (instance == no_reg) instance = fallback;
- __ LoadInstanceFromFrame(instance);
- }
- return instance;
- }
-
DISALLOW_IMPLICIT_CONSTRUCTORS(LiftoffCompiler);
};
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
index 63ac2acf8b..74eb10ca34 100644
--- a/deps/v8/src/wasm/baseline/liftoff-register.h
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -313,9 +313,9 @@ class LiftoffRegister {
}
private:
- storage_t code_;
-
explicit constexpr LiftoffRegister(storage_t code) : code_(code) {}
+
+ storage_t code_;
};
ASSERT_TRIVIALLY_COPYABLE(LiftoffRegister);
@@ -467,10 +467,10 @@ class LiftoffRegList {
}
private:
- storage_t regs_ = 0;
-
// Unchecked constructor. Only use for valid bits.
explicit constexpr LiftoffRegList(storage_t bits) : regs_(bits) {}
+
+ storage_t regs_ = 0;
};
ASSERT_TRIVIALLY_COPYABLE(LiftoffRegList);
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 617e193bd1..938fa41ea9 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -10,6 +10,7 @@
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/simd-shuffle.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -41,7 +42,8 @@ namespace liftoff {
//
//
-constexpr int32_t kInstanceOffset = 3 * kSystemPointerSize;
+constexpr int32_t kInstanceOffset =
+ (FLAG_enable_embedded_constant_pool ? 3 : 2) * kSystemPointerSize;
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset =
@@ -130,28 +132,81 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
void LiftoffAssembler::AlignFrameSize() {}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset,
- SafepointTableBuilder*) {
- int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
-
-#ifdef USE_SIMULATOR
- // When using the simulator, deal with Liftoff which allocates the stack
- // before checking it.
- // TODO(arm): Remove this when the stack check mechanism will be updated.
- if (frame_size > KB / 2) {
- bailout(kOtherReason,
- "Stack limited to 512 bytes to avoid a bug in StackCheck");
+void LiftoffAssembler::PatchPrepareStackFrame(
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
+ int frame_size =
+ GetTotalFrameSize() -
+ (FLAG_enable_embedded_constant_pool ? 3 : 2) * kSystemPointerSize;
+
+ Assembler patching_assembler(
+ AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer_start_ + offset, kInstrSize + kGap));
+
+ if (V8_LIKELY(frame_size < 4 * KB)) {
+ patching_assembler.addi(sp, sp, Operand(-frame_size));
return;
}
-#endif
- if (!is_int16(-frame_size)) {
- bailout(kOtherReason, "PPC subi overflow");
+
+ // The frame size is bigger than 4KB, so we might overflow the available stack
+ // space if we first allocate the frame and then do the stack check (we will
+ // need some remaining stack space for throwing the exception). That's why we
+ // check the available stack space before we allocate the frame. To do this we
+ // replace the {__ sub(sp, sp, framesize)} with a jump to OOL code that does
+ // this "extended stack check".
+ //
+ // The OOL code can simply be generated here with the normal assembler,
+ // because all other code generation, including OOL code, has already finished
+ // when {PatchPrepareStackFrame} is called. The function prologue then jumps
+ // to the current {pc_offset()} to execute the OOL code for allocating the
+ // large frame.
+
+ // Emit the unconditional branch in the function prologue (from {offset} to
+ // {pc_offset()}).
+
+ int jump_offset = pc_offset() - offset;
+ if (!is_int26(jump_offset)) {
+ bailout(kUnsupportedArchitecture, "branch offset overflow");
return;
}
- Assembler patching_assembler(
- AssemblerOptions{},
- ExternalAssemblerBuffer(buffer_start_ + offset, kInstrSize + kGap));
- patching_assembler.addi(sp, sp, Operand(-frame_size));
+ patching_assembler.b(jump_offset, LeaveLK);
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ RecordComment("OOL: stack check for large frame");
+ Label continuation;
+ if (frame_size < FLAG_stack_size * 1024) {
+ Register stack_limit = ip;
+ LoadU64(stack_limit,
+ FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset),
+ r0);
+ LoadU64(stack_limit, MemOperand(stack_limit), r0);
+ AddS64(stack_limit, stack_limit, Operand(frame_size), r0);
+ CmpU64(sp, stack_limit);
+ bge(&continuation);
+ }
+
+ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call will not return; just define an empty safepoint.
+ safepoint_table_builder->DefineSafepoint(this);
+ if (FLAG_debug_code) stop();
+
+ bind(&continuation);
+
+ // Now allocate the stack space. Note that this might do more than just
+ // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}.
+ SubS64(sp, sp, Operand(frame_size), r0);
+
+ // Jump back to the start of the function, from {pc_offset()} to
+ // right after the reserved space for the {__ sub(sp, sp, framesize)} (which
+ // is a branch now).
+ jump_offset = offset - pc_offset() + kInstrSize;
+ if (!is_int26(jump_offset)) {
+ bailout(kUnsupportedArchitecture, "branch offset overflow");
+ return;
+ }
+ b(jump_offset, LeaveLK);
}
void LiftoffAssembler::FinishCode() { EmitConstantPool(); }
@@ -431,43 +486,124 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicLoad");
+ Load(dst, src_addr, offset_reg, offset_imm, type, pinned, nullptr, true);
+ lwsync();
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicStore");
+ lwsync();
+ Store(dst_addr, offset_reg, offset_imm, src, type, pinned, nullptr, true);
+ sync();
}
+#ifdef V8_TARGET_BIG_ENDIAN
+constexpr bool is_be = true;
+#else
+constexpr bool is_be = false;
+#endif
+
+#define ATOMIC_OP(instr) \
+ { \
+ Register offset = r0; \
+ if (offset_imm != 0) { \
+ mov(ip, Operand(offset_imm)); \
+ if (offset_reg != no_reg) { \
+ add(ip, ip, offset_reg); \
+ } \
+ offset = ip; \
+ } else { \
+ if (offset_reg != no_reg) { \
+ offset = offset_reg; \
+ } \
+ } \
+ \
+ MemOperand dst = MemOperand(offset, dst_addr); \
+ \
+ switch (type.value()) { \
+ case StoreType::kI32Store8: \
+ case StoreType::kI64Store8: { \
+ auto op_func = [&](Register dst, Register lhs, Register rhs) { \
+ instr(dst, lhs, rhs); \
+ }; \
+ AtomicOps<uint8_t>(dst, value.gp(), result.gp(), r0, op_func); \
+ break; \
+ } \
+ case StoreType::kI32Store16: \
+ case StoreType::kI64Store16: { \
+ auto op_func = [&](Register dst, Register lhs, Register rhs) { \
+ if (is_be) { \
+ ByteReverseU16(dst, lhs); \
+ instr(dst, dst, rhs); \
+ ByteReverseU16(dst, dst); \
+ } else { \
+ instr(dst, lhs, rhs); \
+ } \
+ }; \
+ AtomicOps<uint16_t>(dst, value.gp(), result.gp(), r0, op_func); \
+ break; \
+ } \
+ case StoreType::kI32Store: \
+ case StoreType::kI64Store32: { \
+ auto op_func = [&](Register dst, Register lhs, Register rhs) { \
+ if (is_be) { \
+ ByteReverseU32(dst, lhs); \
+ instr(dst, dst, rhs); \
+ ByteReverseU32(dst, dst); \
+ } else { \
+ instr(dst, lhs, rhs); \
+ } \
+ }; \
+ AtomicOps<uint32_t>(dst, value.gp(), result.gp(), r0, op_func); \
+ break; \
+ } \
+ case StoreType::kI64Store: { \
+ auto op_func = [&](Register dst, Register lhs, Register rhs) { \
+ if (is_be) { \
+ ByteReverseU64(dst, lhs); \
+ instr(dst, dst, rhs); \
+ ByteReverseU64(dst, dst); \
+ } else { \
+ instr(dst, lhs, rhs); \
+ } \
+ }; \
+ AtomicOps<uint64_t>(dst, value.gp(), result.gp(), r0, op_func); \
+ break; \
+ } \
+ default: \
+ UNREACHABLE(); \
+ } \
+ }
+
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAdd");
+ ATOMIC_OP(add);
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicSub");
+ ATOMIC_OP(sub);
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAnd");
+ ATOMIC_OP(and_);
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicOr");
+ ATOMIC_OP(orx);
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicXor");
+ ATOMIC_OP(xor_);
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
@@ -613,16 +749,16 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
switch (kind) {
case kI32:
case kF32:
- LoadU32(ip, liftoff::GetStackSlot(dst_offset + stack_bias), r0);
- StoreU32(ip, liftoff::GetStackSlot(src_offset + stack_bias), r0);
+ LoadU32(ip, liftoff::GetStackSlot(src_offset + stack_bias), r0);
+ StoreU32(ip, liftoff::GetStackSlot(dst_offset + stack_bias), r0);
break;
case kI64:
case kOptRef:
case kRef:
case kRtt:
case kF64:
- LoadU64(ip, liftoff::GetStackSlot(dst_offset), r0);
- StoreU64(ip, liftoff::GetStackSlot(src_offset), r0);
+ LoadU64(ip, liftoff::GetStackSlot(src_offset), r0);
+ StoreU64(ip, liftoff::GetStackSlot(dst_offset), r0);
break;
case kS128:
bailout(kSimd, "simd op");
@@ -786,10 +922,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
// V(name, instr, dtype, stype, dcast, scast, rcast, return_val, return_type)
#define UNOP_LIST(V) \
- V(f32_abs, fabs, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, , \
- void) \
- V(f32_neg, fneg, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, , \
- void) \
+ V(f32_abs, fabs, DoubleRegister, DoubleRegister, , , USE, , void) \
+ V(f32_neg, fneg, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f32_sqrt, fsqrt, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, , \
void) \
V(f32_floor, frim, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, \
@@ -838,89 +972,89 @@ UNOP_LIST(EMIT_UNOP_FUNCTION)
// V(name, instr, dtype, stype1, stype2, dcast, scast1, scast2, rcast,
// return_val, return_type)
-#define BINOP_LIST(V) \
- V(f32_copysign, fcpsgn, DoubleRegister, DoubleRegister, DoubleRegister, , , \
- , ROUND_F64_TO_F32, , void) \
- V(f64_copysign, fcpsgn, DoubleRegister, DoubleRegister, DoubleRegister, , , \
- , USE, , void) \
- V(f32_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f32_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f64_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f64_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(i64_sub, SubS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(i64_add, AddS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(i64_addi, AddS64, LiftoffRegister, LiftoffRegister, int64_t, LFR_TO_REG, \
- LFR_TO_REG, Operand, USE, , void) \
- V(i32_sub, SubS32, Register, Register, Register, , , , USE, , void) \
- V(i32_add, AddS32, Register, Register, Register, , , , USE, , void) \
- V(i32_addi, AddS32, Register, Register, int32_t, , , Operand, USE, , void) \
- V(i32_subi, SubS32, Register, Register, int32_t, , , Operand, USE, , void) \
- V(i32_mul, MulS32, Register, Register, Register, , , , USE, , void) \
- V(i64_mul, MulS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(i32_andi, AndU32, Register, Register, int32_t, , , Operand, USE, , void) \
- V(i32_ori, OrU32, Register, Register, int32_t, , , Operand, USE, , void) \
- V(i32_xori, XorU32, Register, Register, int32_t, , , Operand, USE, , void) \
- V(i32_and, AndU32, Register, Register, Register, , , , USE, , void) \
- V(i32_or, OrU32, Register, Register, Register, , , , USE, , void) \
- V(i32_xor, XorU32, Register, Register, Register, , , , USE, , void) \
- V(i64_and, AndU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(i64_or, OrU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(i64_xor, XorU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
- LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
- V(i64_andi, AndU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
- LFR_TO_REG, Operand, USE, , void) \
- V(i64_ori, OrU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
- LFR_TO_REG, Operand, USE, , void) \
- V(i64_xori, XorU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
- LFR_TO_REG, Operand, USE, , void) \
- V(i32_shli, ShiftLeftU32, Register, Register, int32_t, , , \
- INT32_AND_WITH_1F, USE, , void) \
- V(i32_sari, ShiftRightS32, Register, Register, int32_t, , , \
- INT32_AND_WITH_1F, USE, , void) \
- V(i32_shri, ShiftRightU32, Register, Register, int32_t, , , \
- INT32_AND_WITH_1F, USE, , void) \
- V(i32_shl, ShiftLeftU32, Register, Register, Register, , , \
- REGISTER_AND_WITH_1F, USE, , void) \
- V(i32_sar, ShiftRightS32, Register, Register, Register, , , \
- REGISTER_AND_WITH_1F, USE, , void) \
- V(i32_shr, ShiftRightU32, Register, Register, Register, , , \
- REGISTER_AND_WITH_1F, USE, , void) \
- V(i64_shl, ShiftLeftU64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
- V(i64_sar, ShiftRightS64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
- V(i64_shr, ShiftRightU64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
- V(i64_shli, ShiftLeftU64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
- V(i64_sari, ShiftRightS64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
- V(i64_shri, ShiftRightU64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
- V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f64_mul, MulF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f64_div, DivF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f32_add, AddF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f32_sub, SubF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f32_mul, MulF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
- USE, , void) \
- V(f32_div, DivF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+#define BINOP_LIST(V) \
+ V(f32_copysign, CopySignF64, DoubleRegister, DoubleRegister, DoubleRegister, \
+ , , , ROUND_F64_TO_F32, , void) \
+ V(f64_copysign, CopySignF64, DoubleRegister, DoubleRegister, DoubleRegister, \
+ , , , USE, , void) \
+ V(f32_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_min, MinF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_max, MaxF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(i64_sub, SubS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_add, AddS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_addi, AddS64, LiftoffRegister, LiftoffRegister, int64_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void) \
+ V(i32_sub, SubS32, Register, Register, Register, , , , USE, , void) \
+ V(i32_add, AddS32, Register, Register, Register, , , , USE, , void) \
+ V(i32_addi, AddS32, Register, Register, int32_t, , , Operand, USE, , void) \
+ V(i32_subi, SubS32, Register, Register, int32_t, , , Operand, USE, , void) \
+ V(i32_mul, MulS32, Register, Register, Register, , , , USE, , void) \
+ V(i64_mul, MulS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i32_andi, AndU32, Register, Register, int32_t, , , Operand, USE, , void) \
+ V(i32_ori, OrU32, Register, Register, int32_t, , , Operand, USE, , void) \
+ V(i32_xori, XorU32, Register, Register, int32_t, , , Operand, USE, , void) \
+ V(i32_and, AndU32, Register, Register, Register, , , , USE, , void) \
+ V(i32_or, OrU32, Register, Register, Register, , , , USE, , void) \
+ V(i32_xor, XorU32, Register, Register, Register, , , , USE, , void) \
+ V(i64_and, AndU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_or, OrU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_xor, XorU64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(i64_andi, AndU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void) \
+ V(i64_ori, OrU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void) \
+ V(i64_xori, XorU64, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE, , void) \
+ V(i32_shli, ShiftLeftU32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, USE, , void) \
+ V(i32_sari, ShiftRightS32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, USE, , void) \
+ V(i32_shri, ShiftRightU32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, USE, , void) \
+ V(i32_shl, ShiftLeftU32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, USE, , void) \
+ V(i32_sar, ShiftRightS32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, USE, , void) \
+ V(i32_shr, ShiftRightU32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, USE, , void) \
+ V(i64_shl, ShiftLeftU64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
+ V(i64_sar, ShiftRightS64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
+ V(i64_shr, ShiftRightU64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
+ V(i64_shli, ShiftLeftU64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
+ V(i64_sari, ShiftRightS64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
+ V(i64_shri, ShiftRightU64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
+ V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_mul, MulF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_div, DivF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_add, AddF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_sub, SubF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_mul, MulF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_div, DivF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
USE, , void)
#define EMIT_BINOP_FUNCTION(name, instr, dtype, stype1, stype2, dcast, scast1, \
@@ -1082,8 +1216,192 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
- bailout(kUnsupportedArchitecture, "emit_type_conversion");
- return true;
+ switch (opcode) {
+ case kExprI32ConvertI64:
+ extsw(dst.gp(), src.gp());
+ return true;
+ case kExprI64SConvertI32:
+ extsw(dst.gp(), src.gp());
+ return true;
+ case kExprI64UConvertI32:
+ ZeroExtWord32(dst.gp(), src.gp());
+ return true;
+ case kExprF32ConvertF64:
+ frsp(dst.fp(), src.fp());
+ return true;
+ case kExprF64ConvertF32:
+ fmr(dst.fp(), src.fp());
+ return true;
+ case kExprF32SConvertI32: {
+ ConvertIntToFloat(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF32UConvertI32: {
+ ConvertUnsignedIntToFloat(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF64SConvertI32: {
+ ConvertIntToDouble(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF64UConvertI32: {
+ ConvertUnsignedIntToDouble(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF64SConvertI64: {
+ ConvertInt64ToDouble(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF64UConvertI64: {
+ ConvertUnsignedInt64ToDouble(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF32SConvertI64: {
+ ConvertInt64ToFloat(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprF32UConvertI64: {
+ ConvertUnsignedInt64ToFloat(src.gp(), dst.fp());
+ return true;
+ }
+ case kExprI32SConvertF64:
+ case kExprI32SConvertF32: {
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(trap);
+
+ fctiwz(kScratchDoubleReg, src.fp());
+ MovDoubleLowToInt(dst.gp(), kScratchDoubleReg);
+ mcrfs(cr7, VXCVI);
+ boverflow(trap, cr7);
+ return true;
+ }
+ case kExprI32UConvertF64:
+ case kExprI32UConvertF32: {
+ ConvertDoubleToUnsignedInt64(src.fp(), r0, kScratchDoubleReg,
+ kRoundToZero);
+ mcrfs(cr7, VXCVI); // extract FPSCR field containing VXCVI into cr7
+ boverflow(trap, cr7);
+ ZeroExtWord32(dst.gp(), r0);
+ CmpU64(dst.gp(), r0);
+ bne(trap);
+ return true;
+ }
+ case kExprI64SConvertF64:
+ case kExprI64SConvertF32: {
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(trap);
+
+ fctidz(kScratchDoubleReg, src.fp());
+ MovDoubleToInt64(dst.gp(), kScratchDoubleReg);
+ mcrfs(cr7, VXCVI);
+ boverflow(trap, cr7);
+ return true;
+ }
+ case kExprI64UConvertF64:
+ case kExprI64UConvertF32: {
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(trap);
+
+ fctiduz(kScratchDoubleReg, src.fp());
+ MovDoubleToInt64(dst.gp(), kScratchDoubleReg);
+ mcrfs(cr7, VXCVI);
+ boverflow(trap, cr7);
+ return true;
+ }
+ case kExprI32SConvertSatF64:
+ case kExprI32SConvertSatF32: {
+ Label done, src_is_nan;
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(&src_is_nan);
+
+ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
+ fctiwz(kScratchDoubleReg, src.fp());
+ MovDoubleLowToInt(dst.gp(), kScratchDoubleReg);
+ b(&done);
+
+ bind(&src_is_nan);
+ mov(dst.gp(), Operand::Zero());
+
+ bind(&done);
+ return true;
+ }
+ case kExprI32UConvertSatF64:
+ case kExprI32UConvertSatF32: {
+ Label done, src_is_nan;
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(&src_is_nan);
+
+ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
+ fctiwuz(kScratchDoubleReg, src.fp());
+ MovDoubleLowToInt(dst.gp(), kScratchDoubleReg);
+ b(&done);
+
+ bind(&src_is_nan);
+ mov(dst.gp(), Operand::Zero());
+
+ bind(&done);
+ return true;
+ }
+ case kExprI64SConvertSatF64:
+ case kExprI64SConvertSatF32: {
+ Label done, src_is_nan;
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(&src_is_nan);
+
+ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
+ fctidz(kScratchDoubleReg, src.fp());
+ MovDoubleToInt64(dst.gp(), kScratchDoubleReg);
+ b(&done);
+
+ bind(&src_is_nan);
+ mov(dst.gp(), Operand::Zero());
+
+ bind(&done);
+ return true;
+ }
+ case kExprI64UConvertSatF64:
+ case kExprI64UConvertSatF32: {
+ Label done, src_is_nan;
+ LoadDoubleLiteral(kScratchDoubleReg, base::Double(0.0), r0);
+ fcmpu(src.fp(), kScratchDoubleReg);
+ bunordered(&src_is_nan);
+
+ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
+ fctiduz(kScratchDoubleReg, src.fp());
+ MovDoubleToInt64(dst.gp(), kScratchDoubleReg);
+ b(&done);
+
+ bind(&src_is_nan);
+ mov(dst.gp(), Operand::Zero());
+
+ bind(&done);
+ return true;
+ }
+ case kExprI32ReinterpretF32: {
+ MovFloatToInt(dst.gp(), src.fp(), kScratchDoubleReg);
+ return true;
+ }
+ case kExprI64ReinterpretF64: {
+ MovDoubleToInt64(dst.gp(), src.fp());
+ return true;
+ }
+ case kExprF32ReinterpretI32: {
+ MovIntToFloat(dst.fp(), src.gp(), r0);
+ return true;
+ }
+ case kExprF64ReinterpretI64: {
+ MovInt64ToDouble(dst.fp(), src.gp());
+ return true;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::emit_jump(Label* label) { b(al, label); }
@@ -1133,8 +1451,13 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
Label* label, Register lhs,
int32_t imm) {
+ bool use_signed = liftoff::UseSignedOp(liftoff_cond);
Condition cond = liftoff::ToCondition(liftoff_cond);
- CmpS32(lhs, Operand(imm), r0);
+ if (use_signed) {
+ CmpS32(lhs, Operand(imm), r0);
+ } else {
+ CmpU32(lhs, Operand(imm), r0);
+ }
b(cond, label);
}
@@ -1191,11 +1514,19 @@ void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
- fcmpu(lhs, rhs, cr7);
- Label done;
- mov(dst, Operand(1));
- b(liftoff::ToCondition(liftoff_cond), &done, cr7);
+ fcmpu(lhs, rhs, cr0);
+ Label nan, done;
+ bunordered(&nan, cr0);
mov(dst, Operand::Zero());
+ b(NegateCondition(liftoff::ToCondition(liftoff_cond)), &done, cr0);
+ mov(dst, Operand(1));
+ b(&done);
+ bind(&nan);
+ if (liftoff_cond == kUnequal) {
+ mov(dst, Operand(1));
+ } else {
+ mov(dst, Operand::Zero());
+ }
bind(&done);
}
@@ -2364,15 +2695,18 @@ void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
- bailout(kUnsupportedArchitecture, "StackCheck");
+ LoadU64(limit_address, MemOperand(limit_address), r0);
+ CmpU64(sp, limit_address);
+ ble(ool_code);
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
- bailout(kUnsupportedArchitecture, "CallTrapCallbackForTesting");
+ PrepareCallCFunction(0, 0, ip);
+ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- bailout(kUnsupportedArchitecture, "AssertUnreachable");
+ if (FLAG_debug_code) Abort(reason);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
@@ -2506,15 +2840,17 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
- bailout(kUnsupportedArchitecture, "CallIndirect");
+ DCHECK(target != no_reg);
+ Call(target);
}
void LiftoffAssembler::TailCallIndirect(Register target) {
- bailout(kUnsupportedArchitecture, "TailCallIndirect");
+ DCHECK(target != no_reg);
+ Jump(target);
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
- bailout(kUnsupportedArchitecture, "CallRuntimeStub");
+ Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
diff --git a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
index 1860a1920f..616f10fa8f 100644
--- a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
+++ b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
@@ -1718,17 +1718,21 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
VU.set(kScratchReg, E8, m1);
+ VRegister temp =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(lhs, rhs)).fp().toV();
if (dst_v == lhs_v) {
- vmv_vv(kSimd128ScratchReg2, lhs_v);
- lhs_v = kSimd128ScratchReg2;
+ vmv_vv(temp, lhs_v);
+ lhs_v = temp;
} else if (dst_v == rhs_v) {
- vmv_vv(kSimd128ScratchReg2, rhs_v);
- rhs_v = kSimd128ScratchReg2;
+ vmv_vv(temp, rhs_v);
+ rhs_v = temp;
}
vrgather_vv(dst_v, lhs_v, kSimd128ScratchReg);
- vadd_vi(kSimd128ScratchReg, kSimd128ScratchReg, -16);
- vrgather_vv(kSimd128ScratchReg, rhs_v, kSimd128ScratchReg);
- vor_vv(dst_v, dst_v, kSimd128ScratchReg);
+ vadd_vi(kSimd128ScratchReg, kSimd128ScratchReg,
+ -16); // The indices in range [16, 31] select the i - 16-th element
+ // of rhs
+ vrgather_vv(kSimd128ScratchReg2, rhs_v, kSimd128ScratchReg);
+ vor_vv(dst_v, dst_v, kSimd128ScratchReg2);
}
void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
@@ -1788,12 +1792,16 @@ void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_splat");
+ VU.set(kScratchReg, E32, m1);
+ fmv_x_w(kScratchReg, src.fp());
+ vmv_vx(dst.fp().toV(), kScratchReg);
}
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f64x2_splat");
+ VU.set(kScratchReg, E64, m1);
+ fmv_x_d(kScratchReg, src.fp());
+ vmv_vx(dst.fp().toV(), kScratchReg);
}
#define SIMD_BINOP(name1, name2) \
@@ -1944,22 +1952,34 @@ void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_eq");
+ VU.set(kScratchReg, E32, m1);
+ vmfeq_vv(v0, rhs.fp().toV(), lhs.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_ne");
+ VU.set(kScratchReg, E32, m1);
+ vmfne_vv(v0, rhs.fp().toV(), lhs.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_lt");
+ VU.set(kScratchReg, E32, m1);
+ vmflt_vv(v0, rhs.fp().toV(), lhs.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_le");
+ VU.set(kScratchReg, E32, m1);
+ vmfle_vv(v0, rhs.fp().toV(), lhs.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
@@ -1979,7 +1999,10 @@ void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "f32x4.demote_f64x2_zero");
+ VU.set(kScratchReg, E32, m1);
+ vfncvt_f_f_w(dst.fp().toV(), src.fp().toV());
+ vmv_vi(v0, 12);
+ vmerge_vx(dst.fp().toV(), zero_reg, dst.fp().toV());
}
void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
@@ -2052,7 +2075,11 @@ void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister mask) {
- bailout(kSimd, "emit_s128_select");
+ VU.set(kScratchReg, E8, m1);
+ vand_vv(kSimd128ScratchReg, src1.fp().toV(), mask.fp().toV());
+ vnot_vv(kSimd128ScratchReg2, mask.fp().toV());
+ vand_vv(kSimd128ScratchReg2, src2.fp().toV(), kSimd128ScratchReg2);
+ vor_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
}
void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
@@ -2355,9 +2382,12 @@ void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- DCHECK(is_uint5(rhs));
- VU.set(kScratchReg, E32, m1);
- vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
+ if (is_uint5(rhs)) {
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
+ } else {
+ li(kScratchReg, rhs);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), kScratchReg);
+ }
}
void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
@@ -2494,7 +2524,7 @@ void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- VU.set(kScratchReg, E8, m1);
+ VU.set(kScratchReg, E64, m1);
vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
@@ -2505,12 +2535,14 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_abs");
+ VU.set(kScratchReg, E32, m1);
+ vfabs_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_neg");
+ VU.set(kScratchReg, E32, m1);
+ vfneg_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
@@ -2520,13 +2552,13 @@ void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
bool LiftoffAssembler::emit_f32x4_ceil(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_ceil");
+ Ceil_f(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
return true;
}
bool LiftoffAssembler::emit_f32x4_floor(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_floor");
+ Floor_f(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
return true;
}
@@ -2544,32 +2576,53 @@ bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_add");
+ VU.set(kScratchReg, E32, m1);
+ vfadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_sub");
+ VU.set(kScratchReg, E32, m1);
+ vfsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_mul");
+ VU.set(kScratchReg, E32, m1);
+ VU.set(RoundingMode::RTZ);
+ vfmul_vv(dst.fp().toV(), rhs.fp().toV(), lhs.fp().toV());
}
void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_div");
+ VU.set(kScratchReg, E32, m1);
+ vfdiv_vv(dst.fp().toV(), rhs.fp().toV(), lhs.fp().toV());
}
void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_min");
+ const int32_t kNaN = 0x7FC00000;
+ VU.set(kScratchReg, E32, m1);
+ vmfeq_vv(v0, lhs.fp().toV(), lhs.fp().toV());
+ vmfeq_vv(kSimd128ScratchReg, rhs.fp().toV(), rhs.fp().toV());
+ vand_vv(v0, v0, kSimd128ScratchReg);
+ li(kScratchReg, kNaN);
+ vmv_vx(kSimd128ScratchReg, kScratchReg);
+ vfmin_vv(kSimd128ScratchReg, rhs.fp().toV(), lhs.fp().toV(), Mask);
+ vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_max");
+ const int32_t kNaN = 0x7FC00000;
+ VU.set(kScratchReg, E32, m1);
+ vmfeq_vv(v0, lhs.fp().toV(), lhs.fp().toV());
+ vmfeq_vv(kSimd128ScratchReg, rhs.fp().toV(), rhs.fp().toV());
+ vand_vv(v0, v0, kSimd128ScratchReg);
+ li(kScratchReg, kNaN);
+ vmv_vx(kSimd128ScratchReg, kScratchReg);
+ vfmax_vv(kSimd128ScratchReg, rhs.fp().toV(), lhs.fp().toV(), Mask);
+ vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2584,12 +2637,14 @@ void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f64x2_abs");
+ VU.set(kScratchReg, E64, m1);
+ vfabs_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f64x2_neg");
+ VU.set(kScratchReg, E64, m1);
+ vfneg_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
@@ -2599,13 +2654,13 @@ void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
bool LiftoffAssembler::emit_f64x2_ceil(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f64x2_ceil");
+ Ceil_d(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
return true;
}
bool LiftoffAssembler::emit_f64x2_floor(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f64x2_floor");
+ Floor_d(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
return true;
}
@@ -2623,12 +2678,14 @@ bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst,
void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f64x2_add");
+ VU.set(kScratchReg, E64, m1);
+ vfadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f64x2_sub");
+ VU.set(kScratchReg, E64, m1);
+ vfsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2663,22 +2720,34 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_sconvert_f32x4");
+ VU.set(kScratchReg, E32, m1);
+ VU.set(RoundingMode::RTZ);
+ vmfeq_vv(v0, src.fp().toV(), src.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vfcvt_x_f_v(dst.fp().toV(), src.fp().toV(), Mask);
}
void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_uconvert_f32x4");
+ VU.set(kScratchReg, E32, m1);
+ VU.set(RoundingMode::RTZ);
+ vmfeq_vv(v0, src.fp().toV(), src.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vfcvt_xu_f_v(dst.fp().toV(), src.fp().toV(), Mask);
}
void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_sconvert_i32x4");
+ VU.set(kScratchReg, E32, m1);
+ VU.set(RoundingMode::RTZ);
+ vfcvt_f_x_v(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_uconvert_i32x4");
+ VU.set(kScratchReg, E32, m1);
+ VU.set(RoundingMode::RTZ);
+ vfcvt_f_xu_v(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 3db9ea0975..52e8bb683d 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -10,6 +10,7 @@
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/simd-shuffle.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -122,26 +123,72 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
void LiftoffAssembler::AlignFrameSize() {}
-void LiftoffAssembler::PatchPrepareStackFrame(int offset,
- SafepointTableBuilder*) {
+void LiftoffAssembler::PatchPrepareStackFrame(
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
constexpr int LayInstrSize = 6;
-#ifdef USE_SIMULATOR
- // When using the simulator, deal with Liftoff which allocates the stack
- // before checking it.
- // TODO(arm): Remove this when the stack check mechanism will be updated.
- if (frame_size > KB / 2) {
- bailout(kOtherReason,
- "Stack limited to 512 bytes to avoid a bug in StackCheck");
- return;
- }
-#endif
Assembler patching_assembler(
AssemblerOptions{},
ExternalAssemblerBuffer(buffer_start_ + offset, LayInstrSize + kGap));
- patching_assembler.lay(sp, MemOperand(sp, -frame_size));
+ if (V8_LIKELY(frame_size < 4 * KB)) {
+ patching_assembler.lay(sp, MemOperand(sp, -frame_size));
+ return;
+ }
+
+ // The frame size is bigger than 4KB, so we might overflow the available stack
+ // space if we first allocate the frame and then do the stack check (we will
+ // need some remaining stack space for throwing the exception). That's why we
+ // check the available stack space before we allocate the frame. To do this we
+ // replace the {__ sub(sp, sp, framesize)} with a jump to OOL code that does
+ // this "extended stack check".
+ //
+ // The OOL code can simply be generated here with the normal assembler,
+ // because all other code generation, including OOL code, has already finished
+ // when {PatchPrepareStackFrame} is called. The function prologue then jumps
+ // to the current {pc_offset()} to execute the OOL code for allocating the
+ // large frame.
+
+ // Emit the unconditional branch in the function prologue (from {offset} to
+ // {pc_offset()}).
+
+ int jump_offset = pc_offset() - offset;
+ patching_assembler.branchOnCond(al, jump_offset, true, true);
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ RecordComment("OOL: stack check for large frame");
+ Label continuation;
+ if (frame_size < FLAG_stack_size * 1024) {
+ Register stack_limit = ip;
+ LoadU64(stack_limit,
+ FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset),
+ r0);
+ LoadU64(stack_limit, MemOperand(stack_limit), r0);
+ AddU64(stack_limit, Operand(frame_size));
+ CmpU64(sp, stack_limit);
+ bge(&continuation);
+ }
+
+ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call will not return; just define an empty safepoint.
+ safepoint_table_builder->DefineSafepoint(this);
+ if (FLAG_debug_code) stop();
+
+ bind(&continuation);
+
+ // Now allocate the stack space. Note that this might do more than just
+ // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}.
+ lay(sp, MemOperand(sp, -frame_size));
+
+ // Jump back to the start of the function, from {pc_offset()} to
+ // right after the reserved space for the {__ sub(sp, sp, framesize)} (which
+ // is a branch now).
+ jump_offset = offset - pc_offset() + 6;
+ branchOnCond(al, jump_offset, true);
}
void LiftoffAssembler::FinishCode() {}
@@ -2057,8 +2104,13 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
Label* label, Register lhs,
int32_t imm) {
+ bool use_signed = liftoff::UseSignedOp(liftoff_cond);
Condition cond = liftoff::ToCondition(liftoff_cond);
- CmpS32(lhs, Operand(imm));
+ if (use_signed) {
+ CmpS32(lhs, Operand(imm));
+ } else {
+ CmpU32(lhs, Operand(imm));
+ }
b(cond, label);
}
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 50032eac23..890afa2eda 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -357,6 +357,14 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
LoadTaggedPointerField(dst, Operand(instance, offset));
}
+void LiftoffAssembler::LoadExternalPointer(Register dst, Register instance,
+ int offset, ExternalPointerTag tag,
+ Register isolate_root) {
+ LoadExternalPointerField(dst, FieldOperand(instance, offset), tag,
+ isolate_root,
+ IsolateRootLocation::kInScratchRegister);
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
movq(liftoff::GetInstanceOperand(), instance);
}
@@ -2183,7 +2191,7 @@ void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
}
namespace liftoff {
-template <void (TurboAssembler::*cmp_op)(DoubleRegister, DoubleRegister)>
+template <void (SharedTurboAssembler::*cmp_op)(DoubleRegister, DoubleRegister)>
void EmitFloatSetCond(LiftoffAssembler* assm, Condition cond, Register dst,
DoubleRegister lhs, DoubleRegister rhs) {
Label cont;
@@ -2344,7 +2352,7 @@ inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
assm->setcc(not_equal, dst.gp());
}
-template <void (TurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
+template <void (SharedTurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src,
base::Optional<CpuFeature> feature = base::nullopt) {
@@ -2719,17 +2727,11 @@ void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
// Different register alias requirements depending on CpuFeatures supported:
- if (CpuFeatures::IsSupported(AVX)) {
- // 1. AVX, no requirements.
+ if (CpuFeatures::IsSupported(AVX) || CpuFeatures::IsSupported(SSE4_2)) {
+ // 1. AVX, or SSE4_2 no requirements (I64x2GtS takes care of aliasing).
I64x2GtS(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
- } else if (CpuFeatures::IsSupported(SSE4_2)) {
- // 2. SSE4_2, dst == lhs.
- if (dst != lhs) {
- movaps(dst.fp(), lhs.fp());
- }
- I64x2GtS(dst.fp(), dst.fp(), rhs.fp(), kScratchDoubleReg);
} else {
- // 3. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
+ // 2. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
if (dst == lhs || dst == rhs) {
I64x2GtS(liftoff::kScratchDoubleReg2, lhs.fp(), rhs.fp(),
kScratchDoubleReg);
@@ -2823,9 +2825,7 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
uint64_t vals[2];
memcpy(vals, imms, sizeof(vals));
- TurboAssembler::Move(dst.fp(), vals[0]);
- movq(kScratchRegister, vals[1]);
- Pinsrq(dst.fp(), kScratchRegister, uint8_t{1});
+ TurboAssembler::Move(dst.fp(), vals[1], vals[0]);
}
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
@@ -3415,19 +3415,7 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
LiftoffRegister tmp2 =
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs, tmp1));
- Movaps(tmp1.fp(), lhs.fp());
- Movaps(tmp2.fp(), rhs.fp());
- // Multiply high dword of each qword of left with right.
- Psrlq(tmp1.fp(), byte{32});
- Pmuludq(tmp1.fp(), rhs.fp());
- // Multiply high dword of each qword of right with left.
- Psrlq(tmp2.fp(), byte{32});
- Pmuludq(tmp2.fp(), lhs.fp());
- Paddq(tmp2.fp(), tmp1.fp());
- Psllq(tmp2.fp(), byte{32});
- liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmuludq, &Assembler::pmuludq>(
- this, dst, lhs, rhs);
- Paddq(dst.fp(), tmp2.fp());
+ I64x2Mul(dst.fp(), lhs.fp(), rhs.fp(), tmp1.fp(), tmp2.fp());
}
void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
@@ -3485,12 +3473,12 @@ void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
- Absps(dst.fp(), src.fp());
+ Absps(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
- Negps(dst.fp(), src.fp());
+ Negps(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
@@ -3552,61 +3540,12 @@ void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // The minps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minps in both orders, merge the results, and adjust.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vminps(kScratchDoubleReg, lhs.fp(), rhs.fp());
- vminps(dst.fp(), rhs.fp(), lhs.fp());
- } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
- XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movaps(kScratchDoubleReg, src);
- minps(kScratchDoubleReg, dst.fp());
- minps(dst.fp(), src);
- } else {
- movaps(kScratchDoubleReg, lhs.fp());
- minps(kScratchDoubleReg, rhs.fp());
- movaps(dst.fp(), rhs.fp());
- minps(dst.fp(), lhs.fp());
- }
- // propagate -0's and NaNs, which may be non-canonical.
- Orps(kScratchDoubleReg, dst.fp());
- // Canonicalize NaNs by quieting and clearing the payload.
- Cmpunordps(dst.fp(), kScratchDoubleReg);
- Orps(kScratchDoubleReg, dst.fp());
- Psrld(dst.fp(), byte{10});
- Andnps(dst.fp(), kScratchDoubleReg);
+ F32x4Min(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // The maxps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxps in both orders, merge the results, and adjust.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmaxps(kScratchDoubleReg, lhs.fp(), rhs.fp());
- vmaxps(dst.fp(), rhs.fp(), lhs.fp());
- } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
- XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movaps(kScratchDoubleReg, src);
- maxps(kScratchDoubleReg, dst.fp());
- maxps(dst.fp(), src);
- } else {
- movaps(kScratchDoubleReg, lhs.fp());
- maxps(kScratchDoubleReg, rhs.fp());
- movaps(dst.fp(), rhs.fp());
- maxps(dst.fp(), lhs.fp());
- }
- // Find discrepancies.
- Xorps(dst.fp(), kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- Orps(kScratchDoubleReg, dst.fp());
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- Subps(kScratchDoubleReg, dst.fp());
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- Cmpunordps(dst.fp(), kScratchDoubleReg);
- Psrld(dst.fp(), byte{10});
- Andnps(dst.fp(), kScratchDoubleReg);
+ F32x4Max(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
@@ -3625,12 +3564,12 @@ void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
- Abspd(dst.fp(), src.fp());
+ Abspd(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
- Negpd(dst.fp(), src.fp());
+ Negpd(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index a2b026eff3..e216351030 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -32,7 +32,7 @@
#include "src/builtins/builtins.h"
#include "src/compiler/wasm-compiler.h"
#include "src/objects/js-collection-inl.h"
-#include "src/objects/managed.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/stack-frame-info-inl.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/module-instantiate.h"
@@ -1951,7 +1951,7 @@ auto Table::make(Store* store_abs, const TableType* type, const Ref* ref)
i::Handle<i::FixedArray> backing_store;
i::Handle<i::WasmTableObject> table_obj = i::WasmTableObject::New(
isolate, i::Handle<i::WasmInstanceObject>(), i_type, minimum, has_maximum,
- maximum, &backing_store);
+ maximum, &backing_store, isolate->factory()->null_value());
if (ref) {
i::Handle<i::JSReceiver> init = impl(ref)->v8_object();
diff --git a/deps/v8/src/wasm/compilation-environment.h b/deps/v8/src/wasm/compilation-environment.h
index 773090c4e5..574fe25cca 100644
--- a/deps/v8/src/wasm/compilation-environment.h
+++ b/deps/v8/src/wasm/compilation-environment.h
@@ -45,6 +45,8 @@ enum BoundsCheckStrategy : int8_t {
kNoBoundsChecks
};
+enum class DynamicTiering { kEnabled, kDisabled };
+
// The {CompilationEnv} encapsulates the module data that is used during
// compilation. CompilationEnvs are shareable across multiple compilations.
struct CompilationEnv {
@@ -70,10 +72,13 @@ struct CompilationEnv {
// Features enabled for this compilation.
const WasmFeatures enabled_features;
+ const DynamicTiering dynamic_tiering;
+
constexpr CompilationEnv(const WasmModule* module,
BoundsCheckStrategy bounds_checks,
RuntimeExceptionSupport runtime_exception_support,
- const WasmFeatures& enabled_features)
+ const WasmFeatures& enabled_features,
+ DynamicTiering dynamic_tiering)
: module(module),
bounds_checks(bounds_checks),
runtime_exception_support(runtime_exception_support),
@@ -88,7 +93,8 @@ struct CompilationEnv {
uintptr_t{module->maximum_pages})
: kV8MaxWasmMemoryPages) *
kWasmPageSize),
- enabled_features(enabled_features) {}
+ enabled_features(enabled_features),
+ dynamic_tiering(dynamic_tiering) {}
};
// The wire bytes are either owned by the StreamingDecoder, or (after streaming)
@@ -105,6 +111,7 @@ class WireBytesStorage {
enum class CompilationEvent : uint8_t {
kFinishedBaselineCompilation,
kFinishedExportWrappers,
+ kFinishedCompilationChunk,
kFinishedTopTierCompilation,
kFailedCompilation,
kFinishedRecompilation
@@ -148,6 +155,8 @@ class V8_EXPORT_PRIVATE CompilationState {
void set_compilation_id(int compilation_id);
+ DynamicTiering dynamic_tiering() const;
+
// Override {operator delete} to avoid implicit instantiation of {operator
// delete} with {size_t} argument. The {size_t} argument would be incorrect.
void operator delete(void* ptr) { ::operator delete(ptr); }
@@ -162,7 +171,8 @@ class V8_EXPORT_PRIVATE CompilationState {
// such that it can keep it alive (by regaining a {std::shared_ptr}) in
// certain scopes.
static std::unique_ptr<CompilationState> New(
- const std::shared_ptr<NativeModule>&, std::shared_ptr<Counters>);
+ const std::shared_ptr<NativeModule>&, std::shared_ptr<Counters>,
+ DynamicTiering dynamic_tiering);
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 618e8f013c..3d5ec7f933 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -397,6 +397,10 @@ ValueType read_value_type(Decoder* decoder, const byte* pc,
"invalid value type 's128', enable with --experimental-wasm-simd");
return kWasmBottom;
}
+ if (!VALIDATE(CheckHardwareSupportsSimd())) {
+ DecodeError<validate>(decoder, pc, "Wasm SIMD unsupported");
+ return kWasmBottom;
+ }
return kWasmS128;
}
// Although these codes are included in ValueTypeCode, they technically
@@ -945,6 +949,8 @@ struct ControlBase : public PcForErrors<validate> {
F(GlobalGet, Value* result, const GlobalIndexImmediate<validate>& imm) \
F(StructNewWithRtt, const StructIndexImmediate<validate>& imm, \
const Value& rtt, const Value args[], Value* result) \
+ F(StructNewDefault, const StructIndexImmediate<validate>& imm, \
+ const Value& rtt, Value* result) \
F(ArrayInit, const ArrayIndexImmediate<validate>& imm, \
const base::Vector<Value>& elements, const Value& rtt, Value* result) \
F(RttCanon, uint32_t type_index, Value* result) \
@@ -1047,8 +1053,6 @@ struct ControlBase : public PcForErrors<validate> {
F(TableSize, const IndexImmediate<validate>& imm, Value* result) \
F(TableFill, const IndexImmediate<validate>& imm, const Value& start, \
const Value& value, const Value& count) \
- F(StructNewDefault, const StructIndexImmediate<validate>& imm, \
- const Value& rtt, Value* result) \
F(StructGet, const Value& struct_object, \
const FieldImmediate<validate>& field, bool is_signed, Value* result) \
F(StructSet, const Value& struct_object, \
@@ -1330,11 +1334,10 @@ class WasmDecoder : public Decoder {
}
bool CanReturnCall(const FunctionSig* target_sig) {
- if (target_sig == nullptr) return false;
- size_t num_returns = sig_->return_count();
- if (num_returns != target_sig->return_count()) return false;
- for (size_t i = 0; i < num_returns; ++i) {
- if (sig_->GetReturn(i) != target_sig->GetReturn(i)) return false;
+ if (sig_->return_count() != target_sig->return_count()) return false;
+ auto target_sig_it = target_sig->returns().begin();
+ for (ValueType ret_type : sig_->returns()) {
+ if (!IsSubtypeOf(*target_sig_it++, ret_type, this->module_)) return false;
}
return true;
}
@@ -1849,8 +1852,10 @@ class WasmDecoder : public Decoder {
opcode =
decoder->read_prefixed_opcode<validate>(pc, &length, "gc_index");
switch (opcode) {
+ case kExprStructNew:
case kExprStructNewWithRtt:
- case kExprStructNewDefault: {
+ case kExprStructNewDefault:
+ case kExprStructNewDefaultWithRtt: {
StructIndexImmediate<validate> imm(decoder, pc + length);
return length + imm.length;
}
@@ -1861,8 +1866,10 @@ class WasmDecoder : public Decoder {
FieldImmediate<validate> imm(decoder, pc + length);
return length + imm.length;
}
+ case kExprArrayNew:
case kExprArrayNewWithRtt:
case kExprArrayNewDefault:
+ case kExprArrayNewDefaultWithRtt:
case kExprArrayGet:
case kExprArrayGetS:
case kExprArrayGetU:
@@ -1871,6 +1878,13 @@ class WasmDecoder : public Decoder {
ArrayIndexImmediate<validate> imm(decoder, pc + length);
return length + imm.length;
}
+ case kExprArrayInit:
+ case kExprArrayInitStatic: {
+ ArrayIndexImmediate<validate> array_imm(decoder, pc + length);
+ IndexImmediate<validate> length_imm(
+ decoder, pc + length + array_imm.length, "array length");
+ return length + array_imm.length + length_imm.length;
+ }
case kExprArrayCopy: {
ArrayIndexImmediate<validate> dst_imm(decoder, pc + length);
ArrayIndexImmediate<validate> src_imm(decoder,
@@ -1887,7 +1901,11 @@ class WasmDecoder : public Decoder {
}
case kExprRttCanon:
case kExprRttSub:
- case kExprRttFreshSub: {
+ case kExprRttFreshSub:
+ case kExprRefTestStatic:
+ case kExprRefCastStatic:
+ case kExprBrOnCastStatic:
+ case kExprBrOnCastStaticFail: {
IndexImmediate<validate> imm(decoder, pc + length, "type index");
return length + imm.length;
}
@@ -2041,20 +2059,26 @@ class WasmDecoder : public Decoder {
case kGCPrefix: {
opcode = this->read_prefixed_opcode<validate>(pc);
switch (opcode) {
- case kExprStructNewDefault:
+ case kExprStructNewDefaultWithRtt:
case kExprStructGet:
case kExprStructGetS:
case kExprStructGetU:
case kExprI31New:
case kExprI31GetS:
case kExprI31GetU:
+ case kExprArrayNewDefault:
case kExprArrayLen:
case kExprRttSub:
case kExprRttFreshSub:
+ case kExprRefTestStatic:
+ case kExprRefCastStatic:
+ case kExprBrOnCastStatic:
+ case kExprBrOnCastStaticFail:
return {1, 1};
case kExprStructSet:
return {2, 0};
- case kExprArrayNewDefault:
+ case kExprArrayNew:
+ case kExprArrayNewDefaultWithRtt:
case kExprArrayGet:
case kExprArrayGetS:
case kExprArrayGetU:
@@ -2068,6 +2092,7 @@ class WasmDecoder : public Decoder {
case kExprArrayCopy:
return {5, 0};
case kExprRttCanon:
+ case kExprStructNewDefault:
return {0, 1};
case kExprArrayNewWithRtt:
return {3, 1};
@@ -2076,6 +2101,18 @@ class WasmDecoder : public Decoder {
CHECK(Validate(pc + 2, imm));
return {imm.struct_type->field_count() + 1, 1};
}
+ case kExprStructNew: {
+ StructIndexImmediate<validate> imm(this, pc + 2);
+ CHECK(Validate(pc + 2, imm));
+ return {imm.struct_type->field_count(), 1};
+ }
+ case kExprArrayInit:
+ case kExprArrayInitStatic: {
+ ArrayIndexImmediate<validate> array_imm(this, pc + 2);
+ IndexImmediate<validate> length_imm(this, pc + 2 + array_imm.length,
+ "array length");
+ return {length_imm.index + (opcode == kExprArrayInit ? 1 : 0), 1};
+ }
default:
UNREACHABLE();
}
@@ -2614,9 +2651,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
c->reachability = control_at(1)->innerReachability();
const WasmTagSig* sig = imm.tag->sig;
EnsureStackSpace(static_cast<int>(sig->parameter_count()));
- for (size_t i = 0, e = sig->parameter_count(); i < e; ++i) {
- Push(CreateValue(sig->GetParam(i)));
- }
+ for (ValueType type : sig->parameters()) Push(CreateValue(type));
base::Vector<Value> values(stack_ + c->stack_depth, sig->parameter_count());
current_catch_ = c->previous_catch; // Pop try scope.
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchException, imm, c, values);
@@ -2689,19 +2724,19 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// the stack as it is.
break;
case kOptRef: {
- Value result = CreateValue(
- ValueType::Ref(ref_object.type.heap_type(), kNonNullable));
- // The result of br_on_null has the same value as the argument (but a
- // non-nullable type).
- if (V8_LIKELY(current_code_reachable_and_ok_)) {
- CALL_INTERFACE(BrOnNull, ref_object, imm.depth);
- CALL_INTERFACE(Forward, ref_object, &result);
- c->br_merge()->reached = true;
- }
- // In unreachable code, we still have to push a value of the correct
- // type onto the stack.
- Drop(ref_object);
- Push(result);
+ Value result = CreateValue(
+ ValueType::Ref(ref_object.type.heap_type(), kNonNullable));
+ // The result of br_on_null has the same value as the argument (but a
+ // non-nullable type).
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ CALL_INTERFACE(BrOnNull, ref_object, imm.depth);
+ CALL_INTERFACE(Forward, ref_object, &result);
+ c->br_merge()->reached = true;
+ }
+ // In unreachable code, we still have to push a value of the correct
+ // type onto the stack.
+ Drop(ref_object);
+ Push(result);
break;
}
default:
@@ -3299,7 +3334,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
if (!this->Validate(this->pc_ + 1, imm)) return 0;
if (!VALIDATE(this->CanReturnCall(imm.sig))) {
this->DecodeError("%s: %s", WasmOpcodes::OpcodeName(kExprReturnCall),
- "tail call return types mismatch");
+ "tail call type error");
return 0;
}
ArgVector args = PeekArgs(imm.sig);
@@ -3602,8 +3637,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
V8_NOINLINE int EnsureStackArguments_Slow(int count, uint32_t limit) {
if (!VALIDATE(control_.back().unreachable())) {
- int index = count - stack_size() - 1;
- NotEnoughArgumentsError(index);
+ NotEnoughArgumentsError(count, stack_size() - limit);
}
// Silently create unreachable values out of thin air underneath the
// existing stack values. To do so, we have to move existing stack values
@@ -4000,22 +4034,32 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
int DecodeGCOpcode(WasmOpcode opcode, uint32_t opcode_length) {
switch (opcode) {
+ case kExprStructNew:
case kExprStructNewWithRtt: {
StructIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value rtt = Peek(0, imm.struct_type->field_count());
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(imm.struct_type->field_count(), rtt, "rtt");
- return 0;
- }
- // TODO(7748): Drop this check if {imm} is dropped from the proposal
- // à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(
- rtt.type.is_bottom() ||
- (rtt.type.ref_index() == imm.index && rtt.type.has_depth()))) {
- PopTypeError(imm.struct_type->field_count(), rtt,
- "rtt with depth for type " + std::to_string(imm.index));
- return 0;
+ Value rtt = opcode == kExprStructNew
+ ? CreateValue(ValueType::Rtt(imm.index))
+ : Peek(0, imm.struct_type->field_count());
+ if (opcode == kExprStructNew) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprStructNewWithRtt);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(imm.struct_type->field_count(), rtt, "rtt");
+ return 0;
+ }
+ // TODO(7748): Drop this check if {imm} is dropped from the proposal
+ // à la https://github.com/WebAssembly/function-references/pull/31.
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ (rtt.type.ref_index() == imm.index &&
+ rtt.type.has_depth()))) {
+ PopTypeError(
+ imm.struct_type->field_count(), rtt,
+ "rtt with depth for type " + std::to_string(imm.index));
+ return 0;
+ }
}
ArgVector args = PeekArgs(imm.struct_type, 1);
Value value = CreateValue(ValueType::Ref(imm.index, kNonNullable));
@@ -4026,8 +4070,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(value);
return opcode_length + imm.length;
}
- case kExprStructNewDefault: {
- NON_CONST_ONLY
+ case kExprStructNewDefault:
+ case kExprStructNewDefaultWithRtt: {
StructIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (validate) {
@@ -4035,26 +4079,34 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
ValueType ftype = imm.struct_type->field(i);
if (!VALIDATE(ftype.is_defaultable())) {
this->DecodeError(
- "struct.new_default_with_rtt: immediate struct type %d has "
- "field %d of non-defaultable type %s",
- imm.index, i, ftype.name().c_str());
+ "%s: struct type %d has field %d of non-defaultable type %s",
+ WasmOpcodes::OpcodeName(opcode), imm.index, i,
+ ftype.name().c_str());
return 0;
}
}
}
- Value rtt = Peek(0, 0);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(0, rtt, "rtt");
- return 0;
- }
- // TODO(7748): Drop this check if {imm} is dropped from the proposal
- // à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(
- rtt.type.is_bottom() ||
- (rtt.type.ref_index() == imm.index && rtt.type.has_depth()))) {
- PopTypeError(0, rtt,
- "rtt with depth for type " + std::to_string(imm.index));
- return 0;
+ Value rtt = opcode == kExprStructNewDefault
+ ? CreateValue(ValueType::Rtt(imm.index))
+ : Peek(0, 0);
+ if (opcode == kExprStructNewDefault) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprStructNewDefaultWithRtt);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(0, rtt, "rtt");
+ return 0;
+ }
+ // TODO(7748): Drop this check if {imm} is dropped from the proposal
+ // à la https://github.com/WebAssembly/function-references/pull/31.
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ (rtt.type.ref_index() == imm.index &&
+ rtt.type.has_depth()))) {
+ PopTypeError(
+ 0, rtt, "rtt with depth for type " + std::to_string(imm.index));
+ return 0;
+ }
}
Value value = CreateValue(ValueType::Ref(imm.index, kNonNullable));
CALL_INTERFACE_IF_OK_AND_REACHABLE(StructNewDefault, imm, rtt, &value);
@@ -4128,23 +4180,32 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Drop(2);
return opcode_length + field.length;
}
+ case kExprArrayNew:
case kExprArrayNewWithRtt: {
NON_CONST_ONLY
ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
- Value rtt = Peek(0, 2);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(2, rtt, "rtt");
- return 0;
- }
- // TODO(7748): Drop this check if {imm} is dropped from the proposal
- // à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(
- rtt.type.is_bottom() ||
- (rtt.type.ref_index() == imm.index && rtt.type.has_depth()))) {
- PopTypeError(2, rtt,
- "rtt with depth for type " + std::to_string(imm.index));
- return 0;
+ Value rtt = opcode == kExprArrayNew
+ ? CreateValue(ValueType::Rtt(imm.index))
+ : Peek(0, 2);
+ if (opcode == kExprArrayNew) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprArrayNewWithRtt);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(2, rtt, "rtt");
+ return 0;
+ }
+ // TODO(7748): Drop this check if {imm} is dropped from the proposal
+ // à la https://github.com/WebAssembly/function-references/pull/31.
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ (rtt.type.ref_index() == imm.index &&
+ rtt.type.has_depth()))) {
+ PopTypeError(
+ 2, rtt, "rtt with depth for type " + std::to_string(imm.index));
+ return 0;
+ }
}
Value length = Peek(1, 1, kWasmI32);
Value initial_value =
@@ -4156,30 +4217,39 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(value);
return opcode_length + imm.length;
}
- case kExprArrayNewDefault: {
+ case kExprArrayNewDefault:
+ case kExprArrayNewDefaultWithRtt: {
NON_CONST_ONLY
ArrayIndexImmediate<validate> imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, imm)) return 0;
if (!VALIDATE(imm.array_type->element_type().is_defaultable())) {
this->DecodeError(
- "array.new_default_with_rtt: immediate array type %d has "
- "non-defaultable element type %s",
- imm.index, imm.array_type->element_type().name().c_str());
- return 0;
- }
- Value rtt = Peek(0, 1);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(1, rtt, "rtt");
+ "%s: array type %d has non-defaultable element type %s",
+ WasmOpcodes::OpcodeName(opcode), imm.index,
+ imm.array_type->element_type().name().c_str());
return 0;
}
- // TODO(7748): Drop this check if {imm} is dropped from the proposal
- // à la https://github.com/WebAssembly/function-references/pull/31.
- if (!VALIDATE(
- rtt.type.is_bottom() ||
- (rtt.type.ref_index() == imm.index && rtt.type.has_depth()))) {
- PopTypeError(1, rtt,
- "rtt with depth for type " + std::to_string(imm.index));
- return 0;
+ Value rtt = opcode == kExprArrayNewDefault
+ ? CreateValue(ValueType::Rtt(imm.index))
+ : Peek(0, 1);
+ if (opcode == kExprArrayNewDefault) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprArrayNewDefaultWithRtt);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(1, rtt, "rtt");
+ return 0;
+ }
+ // TODO(7748): Drop this check if {imm} is dropped from the proposal
+ // à la https://github.com/WebAssembly/function-references/pull/31.
+ if (!VALIDATE(rtt.type.is_bottom() ||
+ (rtt.type.ref_index() == imm.index &&
+ rtt.type.has_depth()))) {
+ PopTypeError(
+ 1, rtt, "rtt with depth for type " + std::to_string(imm.index));
+ return 0;
+ }
}
Value length = Peek(1, 0, kWasmI32);
Value value = CreateValue(ValueType::Ref(imm.index, kNonNullable));
@@ -4294,11 +4364,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Drop(5);
return opcode_length + dst_imm.length + src_imm.length;
}
- case kExprArrayInit: {
- if (decoding_mode != kInitExpression) {
- this->DecodeError("array.init is only allowed in init. expressions");
- return 0;
- }
+ case kExprArrayInit:
+ case kExprArrayInitStatic: {
ArrayIndexImmediate<validate> array_imm(this,
this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, array_imm)) return 0;
@@ -4312,12 +4379,18 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
length_imm.index, kV8MaxWasmArrayInitLength);
return 0;
}
+ Value rtt = opcode == kExprArrayInit
+ ? Peek(0, elem_count, ValueType::Rtt(array_imm.index))
+ : CreateValue(ValueType::Rtt(array_imm.index));
+ if (opcode == kExprArrayInitStatic) {
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, array_imm.index, &rtt);
+ Push(rtt);
+ }
ValueType element_type = array_imm.array_type->element_type();
std::vector<ValueType> element_types(elem_count,
element_type.Unpacked());
FunctionSig element_sig(0, elem_count, element_types.data());
ArgVector elements = PeekArgs(&element_sig, 1);
- Value rtt = Peek(0, elem_count, ValueType::Rtt(array_imm.index));
Value result =
CreateValue(ValueType::Ref(array_imm.index, kNonNullable));
CALL_INTERFACE_IF_OK_AND_REACHABLE(ArrayInit, array_imm, elements, rtt,
@@ -4357,7 +4430,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
"type index");
if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
- Value value = CreateValue(ValueType::Rtt(imm.index, 0));
+ Value value = CreateValue(ValueType::Rtt(
+ imm.index, GetSubtypingDepth(this->module_, imm.index)));
CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &value);
Push(value);
return opcode_length + imm.length;
@@ -4395,16 +4469,29 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
return opcode_length + imm.length;
}
- case kExprRefTest: {
+ case kExprRefTest:
+ case kExprRefTestStatic: {
NON_CONST_ONLY
// "Tests whether {obj}'s runtime type is a runtime subtype of {rtt}."
- Value rtt = Peek(0, 1);
+ Value rtt = Peek(0, 1); // This is safe for the ...Static instruction.
+ if (opcode == kExprRefTestStatic) {
+ IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ "type index");
+ if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
+ opcode_length += imm.length;
+ rtt = CreateValue(ValueType::Rtt(
+ imm.index, GetSubtypingDepth(this->module_, imm.index)));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprRefTest);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(1, rtt, "rtt");
+ return 0;
+ }
+ }
Value obj = Peek(1, 0);
Value value = CreateValue(kWasmI32);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(1, rtt, "rtt");
- return 0;
- }
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
IsSubtypeOf(obj.type,
ValueType::Ref(HeapType::kData, kNullable),
@@ -4429,14 +4516,27 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(value);
return opcode_length;
}
- case kExprRefCast: {
+ case kExprRefCast:
+ case kExprRefCastStatic: {
NON_CONST_ONLY
- Value rtt = Peek(0, 1);
- Value obj = Peek(1, 0);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(1, rtt, "rtt");
- return 0;
+ Value rtt = Peek(0, 1); // This is safe for the ...Static instruction.
+ if (opcode == kExprRefCastStatic) {
+ IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ "type index");
+ if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
+ opcode_length += imm.length;
+ rtt = CreateValue(ValueType::Rtt(
+ imm.index, GetSubtypingDepth(this->module_, imm.index)));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprRefCast);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(1, rtt, "rtt");
+ return 0;
+ }
}
+ Value obj = Peek(1, 0);
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
IsSubtypeOf(obj.type,
ValueType::Ref(HeapType::kData, kNullable),
@@ -4475,7 +4575,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(value);
return opcode_length;
}
- case kExprBrOnCast: {
+ case kExprBrOnCast:
+ case kExprBrOnCastStatic: {
NON_CONST_ONLY
BranchDepthImmediate<validate> branch_depth(this,
this->pc_ + opcode_length);
@@ -4483,10 +4584,22 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
control_.size())) {
return 0;
}
- Value rtt = Peek(0, 1);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(1, rtt, "rtt");
- return 0;
+ Value rtt = Peek(0, 1); // This is safe for the ...Static instruction.
+ if (opcode == kExprBrOnCastStatic) {
+ IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ "type index");
+ if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
+ opcode_length += imm.length;
+ rtt = CreateValue(ValueType::Rtt(
+ imm.index, GetSubtypingDepth(this->module_, imm.index)));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprBrOnCast);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(1, rtt, "rtt");
+ return 0;
+ }
}
Value obj = Peek(1, 0);
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
@@ -4533,7 +4646,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
Push(obj); // Restore stack state on fallthrough.
return opcode_length + branch_depth.length;
}
- case kExprBrOnCastFail: {
+ case kExprBrOnCastFail:
+ case kExprBrOnCastStaticFail: {
NON_CONST_ONLY
BranchDepthImmediate<validate> branch_depth(this,
this->pc_ + opcode_length);
@@ -4541,10 +4655,22 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
control_.size())) {
return 0;
}
- Value rtt = Peek(0, 1);
- if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
- PopTypeError(1, rtt, "rtt");
- return 0;
+ Value rtt = Peek(0, 1); // This is safe for the ...Static instruction.
+ if (opcode == kExprBrOnCastStaticFail) {
+ IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
+ "type index");
+ if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0;
+ opcode_length += imm.length;
+ rtt = CreateValue(ValueType::Rtt(
+ imm.index, GetSubtypingDepth(this->module_, imm.index)));
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(RttCanon, imm.index, &rtt);
+ Push(rtt);
+ } else {
+ DCHECK_EQ(opcode, kExprBrOnCastFail);
+ if (!VALIDATE(rtt.type.is_rtt() || rtt.type.is_bottom())) {
+ PopTypeError(1, rtt, "rtt");
+ return 0;
+ }
}
Value obj = Peek(1, 0);
if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) ||
@@ -4724,7 +4850,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + branch_depth.length;
}
default:
- this->DecodeError("invalid gc opcode");
+ this->DecodeError("invalid gc opcode: %x", opcode);
return 0;
}
}
@@ -4969,9 +5095,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
V8_INLINE ReturnVector CreateReturnValues(const FunctionSig* sig) {
size_t return_count = sig->return_count();
ReturnVector values(return_count);
- for (size_t i = 0; i < return_count; ++i) {
- values[i] = CreateValue(sig->GetReturn(i));
- }
+ std::transform(sig->returns().begin(), sig->returns().end(), values.begin(),
+ [this](ValueType type) { return CreateValue(type); });
return values;
}
V8_INLINE void PushReturns(ReturnVector values) {
@@ -4996,10 +5121,13 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
PopTypeError(index, val, ("type " + expected.name()).c_str());
}
- V8_NOINLINE void NotEnoughArgumentsError(int index) {
+ V8_NOINLINE void NotEnoughArgumentsError(int needed, int actual) {
+ DCHECK_LT(0, needed);
+ DCHECK_LE(0, actual);
+ DCHECK_LT(actual, needed);
this->DecodeError(
- "not enough arguments on the stack for %s, expected %d more",
- SafeOpcodeNameAt(this->pc_), index + 1);
+ "not enough arguments on the stack for %s (need %d, got %d)",
+ SafeOpcodeNameAt(this->pc_), needed, actual);
}
V8_INLINE Value Peek(int depth, int index, ValueType expected) {
@@ -5018,7 +5146,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// Peeking past the current control start in reachable code.
if (!VALIDATE(decoding_mode == kFunctionBody &&
control_.back().unreachable())) {
- NotEnoughArgumentsError(index);
+ NotEnoughArgumentsError(depth + 1, stack_size() - limit);
}
return UnreachableValue(this->pc_);
}
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index 19a862d0d4..d5a82073d2 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -63,12 +63,13 @@ DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
}
unsigned OpcodeLength(const byte* pc, const byte* end) {
- WasmFeatures no_features = WasmFeatures::None();
+ WasmFeatures unused_detected_features;
Zone* no_zone = nullptr;
WasmModule* no_module = nullptr;
FunctionSig* no_sig = nullptr;
- WasmDecoder<Decoder::kNoValidation> decoder(no_zone, no_module, no_features,
- &no_features, no_sig, pc, end, 0);
+ WasmDecoder<Decoder::kNoValidation> decoder(
+ no_zone, no_module, WasmFeatures::All(), &unused_detected_features,
+ no_sig, pc, end, 0);
return WasmDecoder<Decoder::kNoValidation>::OpcodeLength(&decoder, pc);
}
@@ -253,8 +254,8 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
i.pc() + 1, module);
os << " @" << i.pc_offset();
CHECK(decoder.Validate(i.pc() + 1, imm));
- for (uint32_t i = 0; i < imm.out_arity(); i++) {
- os << " " << imm.out_type(i).name();
+ for (uint32_t j = 0; j < imm.out_arity(); j++) {
+ os << " " << imm.out_type(j).name();
}
control_depth++;
break;
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index b8eb6b7050..30775b66ac 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -110,11 +110,10 @@ class WasmGraphBuildingInterface {
};
WasmGraphBuildingInterface(compiler::WasmGraphBuilder* builder,
- int func_index,
- EndpointInstrumentationMode instrumentation)
+ int func_index, InlinedStatus inlined_status)
: builder_(builder),
func_index_(func_index),
- instrumentation_(instrumentation) {}
+ inlined_status_(inlined_status) {}
void StartFunction(FullDecoder* decoder) {
// Get the branch hints map for this function (if available)
@@ -158,7 +157,7 @@ class WasmGraphBuildingInterface {
}
LoadContextIntoSsa(ssa_env);
- if (FLAG_trace_wasm && instrumentation_ == kInstrumentEndpoints) {
+ if (FLAG_trace_wasm && inlined_status_ == kRegularFunction) {
builder_->TraceFunctionEntry(decoder->position());
}
}
@@ -171,7 +170,7 @@ class WasmGraphBuildingInterface {
void StartFunctionBody(FullDecoder* decoder, Control* block) {}
void FinishFunction(FullDecoder*) {
- if (instrumentation_ == kInstrumentEndpoints) {
+ if (inlined_status_ == kRegularFunction) {
builder_->PatchInStackCheckIfNeeded();
}
}
@@ -196,7 +195,7 @@ class WasmGraphBuildingInterface {
TFNode* loop_node = builder_->Loop(control());
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
uint32_t nesting_depth = 0;
for (uint32_t depth = 1; depth < decoder->control_depth(); depth++) {
if (decoder->control_at(depth)->is_loop()) {
@@ -306,7 +305,7 @@ class WasmGraphBuildingInterface {
// However, if loop unrolling is enabled, we must create a loop exit and
// wrap the fallthru values on the stack.
if (block->is_loop()) {
- if (FLAG_wasm_loop_unrolling && block->reachable()) {
+ if (emit_loop_exits() && block->reachable()) {
BuildLoopExits(decoder, block);
WrapLocalsAtLoopExit(decoder, block);
uint32_t arity = block->end_merge.arity;
@@ -434,7 +433,7 @@ class WasmGraphBuildingInterface {
void Trap(FullDecoder* decoder, TrapReason reason) {
ValueVector values;
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
BuildNestedLoopExits(decoder, decoder->control_depth() - 1, false,
values);
}
@@ -473,7 +472,7 @@ class WasmGraphBuildingInterface {
uint32_t ret_count = static_cast<uint32_t>(decoder->sig_->return_count());
NodeVector values(ret_count);
SsaEnv* internal_env = ssa_env_;
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
SsaEnv* exit_env = Split(decoder->zone(), ssa_env_);
SetEnv(exit_env);
auto stack_values = CopyStackValues(decoder, ret_count, drop_values);
@@ -486,7 +485,7 @@ class WasmGraphBuildingInterface {
: decoder->stack_value(ret_count + drop_values);
GetNodes(values.begin(), stack_base, ret_count);
}
- if (FLAG_trace_wasm && instrumentation_ == kInstrumentEndpoints) {
+ if (FLAG_trace_wasm && inlined_status_ == kRegularFunction) {
builder_->TraceFunctionExit(base::VectorOf(values), decoder->position());
}
builder_->Return(base::VectorOf(values));
@@ -498,7 +497,7 @@ class WasmGraphBuildingInterface {
DoReturn(decoder, drop_values);
} else {
Control* target = decoder->control_at(depth);
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
SsaEnv* internal_env = ssa_env_;
SsaEnv* exit_env = Split(decoder->zone(), ssa_env_);
SetEnv(exit_env);
@@ -625,50 +624,128 @@ class WasmGraphBuildingInterface {
LoadContextIntoSsa(ssa_env_);
}
- enum CallMode { kCallDirect, kCallIndirect, kCallRef };
-
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[]) {
- DoCall(decoder, kCallDirect, 0, CheckForNull::kWithoutNullCheck, nullptr,
- imm.sig, imm.index, args, returns);
+ DoCall(decoder, CallInfo::CallDirect(imm.index), imm.sig, args, returns);
}
void ReturnCall(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[]) {
- DoReturnCall(decoder, kCallDirect, 0, CheckForNull::kWithoutNullCheck,
- Value{nullptr, kWasmBottom}, imm.sig, imm.index, args);
+ DoReturnCall(decoder, CallInfo::CallDirect(imm.index), imm.sig, args);
}
void CallIndirect(FullDecoder* decoder, const Value& index,
const CallIndirectImmediate<validate>& imm,
const Value args[], Value returns[]) {
- DoCall(decoder, kCallIndirect, imm.table_imm.index,
- CheckForNull::kWithoutNullCheck, index.node, imm.sig,
- imm.sig_imm.index, args, returns);
+ DoCall(
+ decoder,
+ CallInfo::CallIndirect(index, imm.table_imm.index, imm.sig_imm.index),
+ imm.sig, args, returns);
}
void ReturnCallIndirect(FullDecoder* decoder, const Value& index,
const CallIndirectImmediate<validate>& imm,
const Value args[]) {
- DoReturnCall(decoder, kCallIndirect, imm.table_imm.index,
- CheckForNull::kWithoutNullCheck, index, imm.sig,
- imm.sig_imm.index, args);
+ DoReturnCall(
+ decoder,
+ CallInfo::CallIndirect(index, imm.table_imm.index, imm.sig_imm.index),
+ imm.sig, args);
}
void CallRef(FullDecoder* decoder, const Value& func_ref,
const FunctionSig* sig, uint32_t sig_index, const Value args[],
Value returns[]) {
- DoCall(decoder, kCallRef, 0, NullCheckFor(func_ref.type), func_ref.node,
- sig, sig_index, args, returns);
+ if (!FLAG_wasm_inlining) {
+ DoCall(decoder, CallInfo::CallRef(func_ref, NullCheckFor(func_ref.type)),
+ sig, args, returns);
+ return;
+ }
+
+ // Check for equality against a function at a specific index, and if
+ // successful, just emit a direct call.
+ // TODO(12166): For now, we check against function 0. Decide the index based
+ // on liftoff feedback.
+ const uint32_t expected_function_index = 0;
+
+ TFNode* success_control;
+ TFNode* failure_control;
+ builder_->CompareToExternalFunctionAtIndex(
+ func_ref.node, expected_function_index, &success_control,
+ &failure_control);
+ TFNode* initial_effect = effect();
+
+ builder_->SetControl(success_control);
+ ssa_env_->control = success_control;
+ Value* returns_direct =
+ decoder->zone()->NewArray<Value>(sig->return_count());
+ DoCall(decoder, CallInfo::CallDirect(expected_function_index),
+ decoder->module_->signature(sig_index), args, returns_direct);
+ TFNode* control_direct = control();
+ TFNode* effect_direct = effect();
+
+ builder_->SetEffectControl(initial_effect, failure_control);
+ ssa_env_->effect = initial_effect;
+ ssa_env_->control = failure_control;
+ Value* returns_ref = decoder->zone()->NewArray<Value>(sig->return_count());
+ DoCall(decoder, CallInfo::CallRef(func_ref, NullCheckFor(func_ref.type)),
+ sig, args, returns_ref);
+
+ TFNode* control_ref = control();
+ TFNode* effect_ref = effect();
+
+ TFNode* control_args[] = {control_direct, control_ref};
+ TFNode* control = builder_->Merge(2, control_args);
+
+ TFNode* effect_args[] = {effect_direct, effect_ref, control};
+ TFNode* effect = builder_->EffectPhi(2, effect_args);
+
+ ssa_env_->control = control;
+ ssa_env_->effect = effect;
+ builder_->SetEffectControl(effect, control);
+
+ for (uint32_t i = 0; i < sig->return_count(); i++) {
+ TFNode* phi_args[] = {returns_direct[i].node, returns_ref[i].node,
+ control};
+ returns[i].node = builder_->Phi(sig->GetReturn(i), 2, phi_args);
+ }
}
void ReturnCallRef(FullDecoder* decoder, const Value& func_ref,
const FunctionSig* sig, uint32_t sig_index,
const Value args[]) {
- DoReturnCall(decoder, kCallRef, 0, NullCheckFor(func_ref.type), func_ref,
- sig, sig_index, args);
+ if (!FLAG_wasm_inlining) {
+ DoReturnCall(decoder,
+ CallInfo::CallRef(func_ref, NullCheckFor(func_ref.type)),
+ sig, args);
+ return;
+ }
+
+ // Check for equality against a function at a specific index, and if
+ // successful, just emit a direct call.
+ // TODO(12166): For now, we check against function 0. Decide the index based
+ // on liftoff feedback.
+ const uint32_t expected_function_index = 0;
+
+ TFNode* success_control;
+ TFNode* failure_control;
+ builder_->CompareToExternalFunctionAtIndex(
+ func_ref.node, expected_function_index, &success_control,
+ &failure_control);
+ TFNode* initial_effect = effect();
+
+ builder_->SetControl(success_control);
+ ssa_env_->control = success_control;
+ DoReturnCall(decoder, CallInfo::CallDirect(expected_function_index), sig,
+ args);
+
+ builder_->SetEffectControl(initial_effect, failure_control);
+ ssa_env_->effect = initial_effect;
+ ssa_env_->control = failure_control;
+ DoReturnCall(decoder,
+ CallInfo::CallRef(func_ref, NullCheckFor(func_ref.type)), sig,
+ args);
}
void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth) {
@@ -800,7 +877,7 @@ class WasmGraphBuildingInterface {
}
DCHECK(decoder->control_at(depth)->is_try());
TryInfo* target_try = decoder->control_at(depth)->try_info;
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
ValueVector stack_values;
BuildNestedLoopExits(decoder, depth, true, stack_values,
&block->try_info->exception);
@@ -994,7 +1071,12 @@ class WasmGraphBuildingInterface {
void ArrayInit(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
const base::Vector<Value>& elements, const Value& rtt,
Value* result) {
- UNREACHABLE();
+ NodeVector element_nodes(elements.size());
+ for (uint32_t i = 0; i < elements.size(); i++) {
+ element_nodes[i] = elements[i].node;
+ }
+ result->node = builder_->ArrayInit(imm.index, imm.array_type, rtt.node,
+ VectorOf(element_nodes));
}
void I31New(FullDecoder* decoder, const Value& input, Value* result) {
@@ -1168,7 +1250,7 @@ class WasmGraphBuildingInterface {
const BranchHintMap* branch_hints_ = nullptr;
// Tracks loop data for loop unrolling.
std::vector<compiler::WasmLoopInfo> loop_infos_;
- EndpointInstrumentationMode instrumentation_;
+ InlinedStatus inlined_status_;
TFNode* effect() { return builder_->effect(); }
@@ -1180,6 +1262,14 @@ class WasmGraphBuildingInterface {
->try_info;
}
+ // Loop exits are only used during loop unrolling and are then removed, as
+ // they cannot be handled by later optimization stages. Since unrolling comes
+ // before inlining in the compilation pipeline, we should not emit loop exits
+ // in inlined functions. Also, we should not do so when unrolling is disabled.
+ bool emit_loop_exits() {
+ return FLAG_wasm_loop_unrolling && inlined_status_ == kRegularFunction;
+ }
+
void GetNodes(TFNode** nodes, Value* values, size_t count) {
for (size_t i = 0; i < count; ++i) {
nodes[i] = values[i].node;
@@ -1247,7 +1337,7 @@ class WasmGraphBuildingInterface {
exception_env->effect = if_exception;
SetEnv(exception_env);
TryInfo* try_info = current_try_info(decoder);
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
ValueVector values;
BuildNestedLoopExits(decoder, decoder->control_depth_of_current_catch(),
true, values, &if_exception);
@@ -1259,7 +1349,7 @@ class WasmGraphBuildingInterface {
} else {
DCHECK_EQ(SsaEnv::kMerged, try_info->catch_env->state);
try_info->exception = builder_->CreateOrMergeIntoPhi(
- MachineRepresentation::kWord32, try_info->catch_env->control,
+ MachineRepresentation::kTaggedPointer, try_info->catch_env->control,
try_info->exception, if_exception);
}
@@ -1437,36 +1527,102 @@ class WasmGraphBuildingInterface {
return result;
}
- void DoCall(FullDecoder* decoder, CallMode call_mode, uint32_t table_index,
- CheckForNull null_check, TFNode* caller_node,
- const FunctionSig* sig, uint32_t sig_index, const Value args[],
- Value returns[]) {
+ class CallInfo {
+ public:
+ enum CallMode { kCallDirect, kCallIndirect, kCallRef };
+
+ static CallInfo CallDirect(uint32_t callee_index) {
+ return {kCallDirect, callee_index, nullptr, 0,
+ CheckForNull::kWithoutNullCheck};
+ }
+
+ static CallInfo CallIndirect(const Value& index_value, uint32_t table_index,
+ uint32_t sig_index) {
+ return {kCallIndirect, sig_index, &index_value, table_index,
+ CheckForNull::kWithoutNullCheck};
+ }
+
+ static CallInfo CallRef(const Value& funcref_value,
+ CheckForNull null_check) {
+ return {kCallRef, 0, &funcref_value, 0, null_check};
+ }
+
+ CallMode call_mode() { return call_mode_; }
+
+ uint32_t sig_index() {
+ DCHECK_EQ(call_mode_, kCallIndirect);
+ return callee_or_sig_index_;
+ }
+
+ uint32_t callee_index() {
+ DCHECK_EQ(call_mode_, kCallDirect);
+ return callee_or_sig_index_;
+ }
+
+ CheckForNull null_check() {
+ DCHECK_EQ(call_mode_, kCallRef);
+ return null_check_;
+ }
+
+ const Value* index_or_callee_value() {
+ DCHECK_NE(call_mode_, kCallDirect);
+ return index_or_callee_value_;
+ }
+
+ uint32_t table_index() {
+ DCHECK_EQ(call_mode_, kCallIndirect);
+ return table_index_;
+ }
+
+ private:
+ CallInfo(CallMode call_mode, uint32_t callee_or_sig_index,
+ const Value* index_or_callee_value, uint32_t table_index,
+ CheckForNull null_check)
+ : call_mode_(call_mode),
+ callee_or_sig_index_(callee_or_sig_index),
+ index_or_callee_value_(index_or_callee_value),
+ table_index_(table_index),
+ null_check_(null_check) {}
+ CallMode call_mode_;
+ uint32_t callee_or_sig_index_;
+ const Value* index_or_callee_value_;
+ uint32_t table_index_;
+ CheckForNull null_check_;
+ };
+
+ void DoCall(FullDecoder* decoder, CallInfo call_info, const FunctionSig* sig,
+ const Value args[], Value returns[]) {
size_t param_count = sig->parameter_count();
size_t return_count = sig->return_count();
NodeVector arg_nodes(param_count + 1);
base::SmallVector<TFNode*, 1> return_nodes(return_count);
- arg_nodes[0] = caller_node;
+ arg_nodes[0] = (call_info.call_mode() == CallInfo::kCallDirect)
+ ? nullptr
+ : call_info.index_or_callee_value()->node;
+
for (size_t i = 0; i < param_count; ++i) {
arg_nodes[i + 1] = args[i].node;
}
- switch (call_mode) {
- case kCallIndirect:
+ switch (call_info.call_mode()) {
+ case CallInfo::kCallIndirect:
CheckForException(
decoder, builder_->CallIndirect(
- table_index, sig_index, base::VectorOf(arg_nodes),
+ call_info.table_index(), call_info.sig_index(),
+ base::VectorOf(arg_nodes),
base::VectorOf(return_nodes), decoder->position()));
break;
- case kCallDirect:
+ case CallInfo::kCallDirect:
CheckForException(
- decoder, builder_->CallDirect(sig_index, base::VectorOf(arg_nodes),
- base::VectorOf(return_nodes),
- decoder->position()));
+ decoder, builder_->CallDirect(
+ call_info.callee_index(), base::VectorOf(arg_nodes),
+ base::VectorOf(return_nodes), decoder->position()));
break;
- case kCallRef:
+ case CallInfo::kCallRef:
CheckForException(
- decoder, builder_->CallRef(sig_index, base::VectorOf(arg_nodes),
- base::VectorOf(return_nodes), null_check,
- decoder->position()));
+ decoder,
+ builder_->CallRef(sig, base::VectorOf(arg_nodes),
+ base::VectorOf(return_nodes),
+ call_info.null_check(), decoder->position()));
break;
}
for (size_t i = 0; i < return_count; ++i) {
@@ -1477,18 +1633,23 @@ class WasmGraphBuildingInterface {
LoadContextIntoSsa(ssa_env_);
}
- void DoReturnCall(FullDecoder* decoder, CallMode call_mode,
- uint32_t table_index, CheckForNull null_check,
- Value index_or_caller_value, const FunctionSig* sig,
- uint32_t sig_index, const Value args[]) {
+ void DoReturnCall(FullDecoder* decoder, CallInfo call_info,
+ const FunctionSig* sig, const Value args[]) {
size_t arg_count = sig->parameter_count();
ValueVector arg_values(arg_count + 1);
- arg_values[0] = index_or_caller_value;
- for (uint32_t i = 0; i < arg_count; i++) {
- arg_values[i + 1] = args[i];
+ if (call_info.call_mode() == CallInfo::kCallDirect) {
+ arg_values[0].node = nullptr;
+ } else {
+ arg_values[0] = *call_info.index_or_callee_value();
+ // This is not done by copy assignment.
+ arg_values[0].node = call_info.index_or_callee_value()->node;
}
- if (FLAG_wasm_loop_unrolling) {
+ if (arg_count > 0) {
+ std::memcpy(arg_values.data() + 1, args, arg_count * sizeof(Value));
+ }
+
+ if (emit_loop_exits()) {
BuildNestedLoopExits(decoder, decoder->control_depth(), false,
arg_values);
}
@@ -1496,22 +1657,24 @@ class WasmGraphBuildingInterface {
NodeVector arg_nodes(arg_count + 1);
GetNodes(arg_nodes.data(), base::VectorOf(arg_values));
- switch (call_mode) {
- case kCallIndirect:
- CheckForException(
- decoder, builder_->ReturnCallIndirect(table_index, sig_index,
- base::VectorOf(arg_nodes),
- decoder->position()));
+ switch (call_info.call_mode()) {
+ case CallInfo::kCallIndirect:
+ CheckForException(decoder,
+ builder_->ReturnCallIndirect(
+ call_info.table_index(), call_info.sig_index(),
+ base::VectorOf(arg_nodes), decoder->position()));
break;
- case kCallDirect:
- CheckForException(
- decoder, builder_->ReturnCall(sig_index, base::VectorOf(arg_nodes),
- decoder->position()));
+ case CallInfo::kCallDirect:
+ CheckForException(decoder,
+ builder_->ReturnCall(call_info.callee_index(),
+ base::VectorOf(arg_nodes),
+ decoder->position()));
break;
- case kCallRef:
- CheckForException(decoder, builder_->ReturnCallRef(
- sig_index, base::VectorOf(arg_nodes),
- null_check, decoder->position()));
+ case CallInfo::kCallRef:
+ CheckForException(
+ decoder, builder_->ReturnCallRef(sig, base::VectorOf(arg_nodes),
+ call_info.null_check(),
+ decoder->position()));
break;
}
}
@@ -1546,7 +1709,7 @@ class WasmGraphBuildingInterface {
void BuildNestedLoopExits(FullDecoder* decoder, uint32_t depth_limit,
bool wrap_exit_values, ValueVector& stack_values,
TFNode** exception_value = nullptr) {
- DCHECK(FLAG_wasm_loop_unrolling);
+ DCHECK(emit_loop_exits());
Control* control = nullptr;
// We are only interested in exits from the innermost loop.
for (uint32_t i = 0; i < depth_limit; i++) {
@@ -1575,7 +1738,7 @@ class WasmGraphBuildingInterface {
}
void TerminateThrow(FullDecoder* decoder) {
- if (FLAG_wasm_loop_unrolling) {
+ if (emit_loop_exits()) {
SsaEnv* internal_env = ssa_env_;
SsaEnv* exit_env = Split(decoder->zone(), ssa_env_);
SetEnv(exit_env);
@@ -1604,12 +1767,11 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator,
WasmFeatures* detected, const FunctionBody& body,
std::vector<compiler::WasmLoopInfo>* loop_infos,
compiler::NodeOriginTable* node_origins,
- int func_index,
- EndpointInstrumentationMode instrumentation) {
+ int func_index, InlinedStatus inlined_status) {
Zone zone(allocator, ZONE_NAME);
WasmFullDecoder<Decoder::kFullValidation, WasmGraphBuildingInterface> decoder(
&zone, module, enabled, detected, body, builder, func_index,
- instrumentation);
+ inlined_status);
if (node_origins) {
builder->AddBytecodePositionDecorator(node_origins, &decoder);
}
@@ -1617,7 +1779,7 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator,
if (node_origins) {
builder->RemoveBytecodePositionDecorator();
}
- if (FLAG_wasm_loop_unrolling) {
+ if (FLAG_wasm_loop_unrolling && inlined_status == kRegularFunction) {
*loop_infos = decoder.interface().loop_infos();
}
return decoder.toResult(nullptr);
diff --git a/deps/v8/src/wasm/graph-builder-interface.h b/deps/v8/src/wasm/graph-builder-interface.h
index c264bc8330..49d9dd353c 100644
--- a/deps/v8/src/wasm/graph-builder-interface.h
+++ b/deps/v8/src/wasm/graph-builder-interface.h
@@ -27,10 +27,7 @@ struct FunctionBody;
class WasmFeatures;
struct WasmModule;
-enum EndpointInstrumentationMode {
- kDoNotInstrumentEndpoints,
- kInstrumentEndpoints
-};
+enum InlinedStatus { kInlinedFunction, kRegularFunction };
V8_EXPORT_PRIVATE DecodeResult
BuildTFGraph(AccountingAllocator* allocator, const WasmFeatures& enabled,
@@ -38,7 +35,7 @@ BuildTFGraph(AccountingAllocator* allocator, const WasmFeatures& enabled,
WasmFeatures* detected, const FunctionBody& body,
std::vector<compiler::WasmLoopInfo>* loop_infos,
compiler::NodeOriginTable* node_origins, int func_index,
- EndpointInstrumentationMode instrumentation);
+ InlinedStatus inlined_status);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/init-expr-interface.cc b/deps/v8/src/wasm/init-expr-interface.cc
index 52c45bd18b..818145d095 100644
--- a/deps/v8/src/wasm/init-expr-interface.cc
+++ b/deps/v8/src/wasm/init-expr-interface.cc
@@ -89,6 +89,48 @@ void InitExprInterface::StructNewWithRtt(
ValueType::Ref(HeapType(imm.index), kNonNullable));
}
+namespace {
+WasmValue DefaultValueForType(ValueType type, Isolate* isolate) {
+ switch (type.kind()) {
+ case kI32:
+ case kI8:
+ case kI16:
+ return WasmValue(0);
+ case kI64:
+ return WasmValue(int64_t{0});
+ case kF32:
+ return WasmValue(0.0f);
+ case kF64:
+ return WasmValue(0.0);
+ case kS128:
+ return WasmValue(Simd128());
+ case kOptRef:
+ return WasmValue(isolate->factory()->null_value(), type);
+ case kVoid:
+ case kRtt:
+ case kRttWithDepth:
+ case kRef:
+ case kBottom:
+ UNREACHABLE();
+ }
+}
+} // namespace
+
+void InitExprInterface::StructNewDefault(
+ FullDecoder* decoder, const StructIndexImmediate<validate>& imm,
+ const Value& rtt, Value* result) {
+ if (isolate_ == nullptr) return;
+ std::vector<WasmValue> field_values(imm.struct_type->field_count());
+ for (uint32_t i = 0; i < field_values.size(); i++) {
+ field_values[i] = DefaultValueForType(imm.struct_type->field(i), isolate_);
+ }
+ result->runtime_value =
+ WasmValue(isolate_->factory()->NewWasmStruct(
+ imm.struct_type, field_values.data(),
+ Handle<Map>::cast(rtt.runtime_value.to_ref())),
+ ValueType::Ref(HeapType(imm.index), kNonNullable));
+}
+
void InitExprInterface::ArrayInit(FullDecoder* decoder,
const ArrayIndexImmediate<validate>& imm,
const base::Vector<Value>& elements,
diff --git a/deps/v8/src/wasm/memory-protection-key.cc b/deps/v8/src/wasm/memory-protection-key.cc
index 441826e707..c3e844ff1c 100644
--- a/deps/v8/src/wasm/memory-protection-key.cc
+++ b/deps/v8/src/wasm/memory-protection-key.cc
@@ -166,7 +166,7 @@ bool SetPermissionsAndMemoryProtectionKey(
DISABLE_CFI_ICALL
void SetPermissionsForMemoryProtectionKey(
int key, MemoryProtectionKeyPermission permissions) {
- CHECK_NE(kNoMemoryProtectionKey, key);
+ DCHECK_NE(kNoMemoryProtectionKey, key);
#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
typedef int (*pkey_set_t)(int, unsigned int);
@@ -177,8 +177,27 @@ void SetPermissionsForMemoryProtectionKey(
int ret = pkey_set(key, permissions);
CHECK_EQ(0 /* success */, ret);
#else
- // On platforms without PKU support, we should have failed the CHECK above
- // because the key must be {kNoMemoryProtectionKey}.
+ // On platforms without PKU support, this method cannot be called because
+ // no protection key can have been allocated.
+ UNREACHABLE();
+#endif
+}
+
+DISABLE_CFI_ICALL
+bool MemoryProtectionKeyWritable(int key) {
+ DCHECK_NE(kNoMemoryProtectionKey, key);
+
+#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
+ typedef int (*pkey_get_t)(int);
+ static auto* pkey_get = bit_cast<pkey_get_t>(dlsym(RTLD_DEFAULT, "pkey_get"));
+ // If a valid key was allocated, {pkey_get()} must also be available.
+ DCHECK_NOT_NULL(pkey_get);
+
+ int permissions = pkey_get(key);
+ return permissions == kNoRestrictions;
+#else
+ // On platforms without PKU support, this method cannot be called because
+ // no protection key can have been allocated.
UNREACHABLE();
#endif
}
diff --git a/deps/v8/src/wasm/memory-protection-key.h b/deps/v8/src/wasm/memory-protection-key.h
index c435357567..7a9ba72194 100644
--- a/deps/v8/src/wasm/memory-protection-key.h
+++ b/deps/v8/src/wasm/memory-protection-key.h
@@ -82,6 +82,10 @@ bool SetPermissionsAndMemoryProtectionKey(
void SetPermissionsForMemoryProtectionKey(
int key, MemoryProtectionKeyPermission permissions);
+// Returns {true} if the protection key {key} is write-enabled for the current
+// thread.
+bool MemoryProtectionKeyWritable(int key);
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 2d66102c1f..2611c2d9e9 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -16,6 +16,7 @@
#include "src/base/platform/time.h"
#include "src/base/utils/random-number-generator.h"
#include "src/compiler/wasm-compiler.h"
+#include "src/handles/global-handles-inl.h"
#include "src/heap/heap-inl.h" // For CodeSpaceMemoryModificationScope.
#include "src/logging/counters-scopes.h"
#include "src/logging/metrics.h"
@@ -528,7 +529,8 @@ bool CompilationUnitQueues::Queue::ShouldPublish(
class CompilationStateImpl {
public:
CompilationStateImpl(const std::shared_ptr<NativeModule>& native_module,
- std::shared_ptr<Counters> async_counters);
+ std::shared_ptr<Counters> async_counters,
+ DynamicTiering dynamic_tiering);
~CompilationStateImpl() {
if (compile_job_->IsValid()) compile_job_->CancelAndDetach();
}
@@ -637,6 +639,8 @@ class CompilationStateImpl {
return outstanding_recompilation_functions_ == 0;
}
+ DynamicTiering dynamic_tiering() const { return dynamic_tiering_; }
+
Counters* counters() const { return async_counters_.get(); }
void SetWireBytesStorage(
@@ -662,7 +666,7 @@ class CompilationStateImpl {
private:
uint8_t SetupCompilationProgressForFunction(
- bool lazy_module, const WasmModule* module,
+ bool lazy_module, NativeModule* module,
const WasmFeatures& enabled_features, int func_index);
// Returns the potentially-updated {function_progress}.
@@ -701,6 +705,10 @@ class CompilationStateImpl {
std::vector<std::shared_ptr<JSToWasmWrapperCompilationUnit>>
js_to_wasm_wrapper_units_;
+ // Cache the dynamic tiering configuration to be consistent for the whole
+ // compilation.
+ const DynamicTiering dynamic_tiering_;
+
// This mutex protects all information of this {CompilationStateImpl} which is
// being accessed concurrently.
mutable base::Mutex mutex_;
@@ -745,6 +753,9 @@ class CompilationStateImpl {
int outstanding_baseline_units_ = 0;
int outstanding_export_wrappers_ = 0;
int outstanding_top_tier_functions_ = 0;
+ // The amount of generated top tier code since the last
+ // {kFinishedCompilationChunk} event.
+ size_t bytes_since_last_chunk_ = 0;
std::vector<uint8_t> compilation_progress_;
int outstanding_recompilation_functions_ = 0;
@@ -860,13 +871,17 @@ void CompilationState::set_compilation_id(int compilation_id) {
Impl(this)->set_compilation_id(compilation_id);
}
+DynamicTiering CompilationState::dynamic_tiering() const {
+ return Impl(this)->dynamic_tiering();
+}
+
// static
std::unique_ptr<CompilationState> CompilationState::New(
const std::shared_ptr<NativeModule>& native_module,
- std::shared_ptr<Counters> async_counters) {
- return std::unique_ptr<CompilationState>(
- reinterpret_cast<CompilationState*>(new CompilationStateImpl(
- std::move(native_module), std::move(async_counters))));
+ std::shared_ptr<Counters> async_counters, DynamicTiering dynamic_tiering) {
+ return std::unique_ptr<CompilationState>(reinterpret_cast<CompilationState*>(
+ new CompilationStateImpl(std::move(native_module),
+ std::move(async_counters), dynamic_tiering)));
}
// End of PIMPL implementation of {CompilationState}.
@@ -926,13 +941,18 @@ struct ExecutionTierPair {
};
ExecutionTierPair GetRequestedExecutionTiers(
- const WasmModule* module, const WasmFeatures& enabled_features,
+ NativeModule* native_module, const WasmFeatures& enabled_features,
uint32_t func_index) {
+ const WasmModule* module = native_module->module();
ExecutionTierPair result;
result.baseline_tier = WasmCompilationUnit::GetBaselineExecutionTier(module);
- if (module->origin != kWasmOrigin || !FLAG_wasm_tier_up) {
+ bool dynamic_tiering =
+ Impl(native_module->compilation_state())->dynamic_tiering() ==
+ DynamicTiering::kEnabled;
+ bool tier_up_enabled = !dynamic_tiering && FLAG_wasm_tier_up;
+ if (module->origin != kWasmOrigin || !tier_up_enabled) {
result.top_tier = result.baseline_tier;
return result;
}
@@ -975,8 +995,7 @@ class CompilationUnitBuilder {
return;
}
ExecutionTierPair tiers = GetRequestedExecutionTiers(
- native_module_->module(), native_module_->enabled_features(),
- func_index);
+ native_module_, native_module_->enabled_features(), func_index);
// Compile everything for non-debugging initially. If needed, we will tier
// down when the module is fully compiled. Synchronization would be pretty
// difficult otherwise.
@@ -1141,7 +1160,7 @@ bool CompileLazy(Isolate* isolate, Handle<WasmModuleObject> module_object,
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
ExecutionTierPair tiers =
- GetRequestedExecutionTiers(module, enabled_features, func_index);
+ GetRequestedExecutionTiers(native_module, enabled_features, func_index);
DCHECK_LE(native_module->num_imported_functions(), func_index);
DCHECK_LT(func_index, native_module->num_functions());
@@ -1530,13 +1549,13 @@ class CompilationTimeCallback {
native_module_(std::move(native_module)),
compile_mode_(compile_mode) {}
- void operator()(CompilationEvent event) {
+ void operator()(CompilationEvent compilation_event) {
DCHECK(base::TimeTicks::IsHighResolution());
std::shared_ptr<NativeModule> native_module = native_module_.lock();
if (!native_module) return;
auto now = base::TimeTicks::Now();
auto duration = now - start_time_;
- if (event == CompilationEvent::kFinishedBaselineCompilation) {
+ if (compilation_event == CompilationEvent::kFinishedBaselineCompilation) {
// Reset {start_time_} to measure tier-up time.
start_time_ = now;
if (compile_mode_ != kSynchronous) {
@@ -1561,7 +1580,7 @@ class CompilationTimeCallback {
native_module->baseline_compilation_cpu_duration())};
metrics_recorder_->DelayMainThreadEvent(event, context_id_);
}
- if (event == CompilationEvent::kFinishedTopTierCompilation) {
+ if (compilation_event == CompilationEvent::kFinishedTopTierCompilation) {
TimedHistogram* histogram = async_counters_->wasm_tier_up_module_time();
histogram->AddSample(static_cast<int>(duration.InMicroseconds()));
@@ -1573,7 +1592,7 @@ class CompilationTimeCallback {
native_module->tier_up_cpu_duration())};
metrics_recorder_->DelayMainThreadEvent(event, context_id_);
}
- if (event == CompilationEvent::kFailedCompilation) {
+ if (compilation_event == CompilationEvent::kFailedCompilation) {
v8::metrics::WasmModuleCompiled event{
(compile_mode_ != kSynchronous), // async
(compile_mode_ == kStreaming), // streamed
@@ -2094,8 +2113,12 @@ class AsyncCompileJob::CompilationStateCallback {
: nullptr);
}
break;
+ case CompilationEvent::kFinishedCompilationChunk:
+ DCHECK(CompilationEvent::kFinishedBaselineCompilation == last_event_ ||
+ CompilationEvent::kFinishedCompilationChunk == last_event_);
+ break;
case CompilationEvent::kFinishedTopTierCompilation:
- DCHECK_EQ(CompilationEvent::kFinishedBaselineCompilation, last_event_);
+ DCHECK(CompilationEvent::kFinishedBaselineCompilation == last_event_);
// At this point, the job will already be gone, thus do not access it
// here.
break;
@@ -2821,11 +2844,12 @@ bool AsyncStreamingProcessor::Deserialize(
CompilationStateImpl::CompilationStateImpl(
const std::shared_ptr<NativeModule>& native_module,
- std::shared_ptr<Counters> async_counters)
+ std::shared_ptr<Counters> async_counters, DynamicTiering dynamic_tiering)
: native_module_(native_module.get()),
native_module_weak_(std::move(native_module)),
async_counters_(std::move(async_counters)),
- compilation_unit_queues_(native_module->num_functions()) {}
+ compilation_unit_queues_(native_module->num_functions()),
+ dynamic_tiering_(dynamic_tiering) {}
void CompilationStateImpl::InitCompileJob() {
DCHECK_NULL(compile_job_);
@@ -2858,12 +2882,12 @@ bool CompilationStateImpl::cancelled() const {
}
uint8_t CompilationStateImpl::SetupCompilationProgressForFunction(
- bool lazy_module, const WasmModule* module,
+ bool lazy_module, NativeModule* native_module,
const WasmFeatures& enabled_features, int func_index) {
ExecutionTierPair requested_tiers =
- GetRequestedExecutionTiers(module, enabled_features, func_index);
- CompileStrategy strategy =
- GetCompileStrategy(module, enabled_features, func_index, lazy_module);
+ GetRequestedExecutionTiers(native_module, enabled_features, func_index);
+ CompileStrategy strategy = GetCompileStrategy(
+ native_module->module(), enabled_features, func_index, lazy_module);
bool required_for_baseline = strategy == CompileStrategy::kEager;
bool required_for_top_tier = strategy != CompileStrategy::kLazy;
@@ -2916,7 +2940,7 @@ void CompilationStateImpl::InitializeCompilationProgress(
continue;
}
uint8_t function_progress = SetupCompilationProgressForFunction(
- lazy_module, module, enabled_features, func_index);
+ lazy_module, native_module_, enabled_features, func_index);
compilation_progress_.push_back(function_progress);
}
DCHECK_IMPLIES(lazy_module, outstanding_baseline_units_ == 0);
@@ -3050,7 +3074,7 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
native_module_->UseLazyStub(func_index);
}
compilation_progress_[declared_function_index(module, func_index)] =
- SetupCompilationProgressForFunction(lazy_module, module,
+ SetupCompilationProgressForFunction(lazy_module, native_module_,
enabled_features, func_index);
}
}
@@ -3190,6 +3214,10 @@ void CompilationStateImpl::CommitTopTierCompilationUnit(
void CompilationStateImpl::AddTopTierPriorityCompilationUnit(
WasmCompilationUnit unit, size_t priority) {
compilation_unit_queues_.AddTopTierPriorityUnit(unit, priority);
+ {
+ base::MutexGuard guard(&callbacks_mutex_);
+ outstanding_top_tier_functions_++;
+ }
compile_job_->NotifyConcurrencyIncrease();
}
@@ -3302,6 +3330,9 @@ void CompilationStateImpl::OnFinishedUnits(
DCHECK_GT(outstanding_baseline_units_, 0);
outstanding_baseline_units_--;
}
+ if (code->tier() == ExecutionTier::kTurbofan) {
+ bytes_since_last_chunk_ += code->instructions().size();
+ }
if (reached_tier < required_top_tier &&
required_top_tier <= code->tier()) {
DCHECK_GT(outstanding_top_tier_functions_, 0);
@@ -3355,12 +3386,19 @@ void CompilationStateImpl::TriggerCallbacks(
triggered_events.Add(CompilationEvent::kFinishedExportWrappers);
if (outstanding_baseline_units_ == 0) {
triggered_events.Add(CompilationEvent::kFinishedBaselineCompilation);
- if (outstanding_top_tier_functions_ == 0) {
+ if (dynamic_tiering_ == DynamicTiering::kDisabled &&
+ outstanding_top_tier_functions_ == 0) {
triggered_events.Add(CompilationEvent::kFinishedTopTierCompilation);
}
}
}
+ if (dynamic_tiering_ == DynamicTiering::kEnabled &&
+ static_cast<size_t>(FLAG_wasm_caching_threshold) <
+ bytes_since_last_chunk_) {
+ triggered_events.Add(CompilationEvent::kFinishedCompilationChunk);
+ bytes_since_last_chunk_ = 0;
+ }
if (compile_failed_.load(std::memory_order_relaxed)) {
// *Only* trigger the "failed" event.
triggered_events =
@@ -3371,9 +3409,11 @@ void CompilationStateImpl::TriggerCallbacks(
// Don't trigger past events again.
triggered_events -= finished_events_;
- // Recompilation can happen multiple times, thus do not store this.
- finished_events_ |=
- triggered_events - CompilationEvent::kFinishedRecompilation;
+ // Recompilation can happen multiple times, thus do not store this. There can
+ // also be multiple compilation chunks.
+ finished_events_ |= triggered_events -
+ CompilationEvent::kFinishedRecompilation -
+ CompilationEvent::kFinishedCompilationChunk;
for (auto event :
{std::make_pair(CompilationEvent::kFailedCompilation,
@@ -3384,6 +3424,8 @@ void CompilationStateImpl::TriggerCallbacks(
"wasm.BaselineFinished"),
std::make_pair(CompilationEvent::kFinishedTopTierCompilation,
"wasm.TopTierFinished"),
+ std::make_pair(CompilationEvent::kFinishedCompilationChunk,
+ "wasm.CompilationChunkFinished"),
std::make_pair(CompilationEvent::kFinishedRecompilation,
"wasm.RecompilationFinished")}) {
if (!triggered_events.contains(event.first)) continue;
@@ -3394,7 +3436,11 @@ void CompilationStateImpl::TriggerCallbacks(
}
}
- if (outstanding_baseline_units_ == 0 && outstanding_export_wrappers_ == 0 &&
+ // With dynamic tiering, we don't know if we can ever delete the callback.
+ // TODO(https://crbug.com/v8/12289): Release some callbacks also when dynamic
+ // tiering is enabled.
+ if (dynamic_tiering_ == DynamicTiering::kDisabled &&
+ outstanding_baseline_units_ == 0 && outstanding_export_wrappers_ == 0 &&
outstanding_top_tier_functions_ == 0 &&
outstanding_recompilation_functions_ == 0) {
// Clear the callbacks because no more events will be delivered.
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index d2c78f0da5..8129882ce8 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -550,35 +550,40 @@ class ModuleDecoderImpl : public Decoder {
}
void DecodeTypeSection() {
- uint32_t signatures_count = consume_count("types count", kV8MaxWasmTypes);
- module_->types.reserve(signatures_count);
- for (uint32_t i = 0; ok() && i < signatures_count; ++i) {
+ uint32_t types_count = consume_count("types count", kV8MaxWasmTypes);
+ module_->types.reserve(types_count);
+ for (uint32_t i = 0; ok() && i < types_count; ++i) {
TRACE("DecodeSignature[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
uint8_t kind = consume_u8("type kind");
switch (kind) {
- case kWasmFunctionTypeCode: {
+ case kWasmFunctionTypeCode:
+ case kWasmFunctionSubtypeCode: {
const FunctionSig* s = consume_sig(module_->signature_zone.get());
- module_->add_signature(s);
- break;
- }
- case kWasmFunctionExtendingTypeCode: {
- if (!enabled_features_.has_gc()) {
- errorf(pc(),
- "invalid function type definition, enable with "
- "--experimental-wasm-gc");
- break;
- }
- const FunctionSig* s = consume_sig(module_->signature_zone.get());
- module_->add_signature(s);
- uint32_t super_index = consume_u32v("supertype");
- if (!module_->has_signature(super_index)) {
- errorf(pc(), "invalid function supertype index: %d", super_index);
- break;
+ uint32_t super_index = kNoSuperType;
+ if (kind == kWasmFunctionSubtypeCode) {
+ if (!enabled_features_.has_gc()) {
+ errorf(pc(),
+ "invalid function type definition, enable with "
+ "--experimental-wasm-gc");
+ break;
+ }
+ HeapType super_type = consume_super_type();
+ if (super_type == HeapType::kFunc) {
+ super_index = kGenericSuperType;
+ } else if (super_type.is_index()) {
+ super_index = super_type.representation();
+ } else {
+ errorf(pc(), "type %d: invalid supertype %d", i,
+ super_type.code());
+ break;
+ }
}
+ module_->add_signature(s, super_index);
break;
}
- case kWasmStructTypeCode: {
+ case kWasmStructTypeCode:
+ case kWasmStructSubtypeCode: {
if (!enabled_features_.has_gc()) {
errorf(pc(),
"invalid struct type definition, enable with "
@@ -586,39 +591,26 @@ class ModuleDecoderImpl : public Decoder {
break;
}
const StructType* s = consume_struct(module_->signature_zone.get());
- module_->add_struct_type(s);
+ uint32_t super_index = kNoSuperType;
+ if (kind == kWasmStructSubtypeCode) {
+ HeapType super_type = consume_super_type();
+ if (super_type == HeapType::kData) {
+ super_index = kGenericSuperType;
+ } else if (super_type.is_index()) {
+ super_index = super_type.representation();
+ } else {
+ errorf(pc(), "type %d: invalid supertype %d", i,
+ super_type.code());
+ break;
+ }
+ }
+ module_->add_struct_type(s, super_index);
// TODO(7748): Should we canonicalize struct types, like
// {signature_map} does for function signatures?
break;
}
- case kWasmStructExtendingTypeCode: {
- if (!enabled_features_.has_gc()) {
- errorf(pc(),
- "invalid struct type definition, enable with "
- "--experimental-wasm-gc");
- break;
- }
- const StructType* s = consume_struct(module_->signature_zone.get());
- module_->add_struct_type(s);
- uint32_t super_index = consume_u32v("supertype");
- if (!module_->has_struct(super_index)) {
- errorf(pc(), "invalid struct supertype: %d", super_index);
- break;
- }
- break;
- }
- case kWasmArrayTypeCode: {
- if (!enabled_features_.has_gc()) {
- errorf(pc(),
- "invalid array type definition, enable with "
- "--experimental-wasm-gc");
- break;
- }
- const ArrayType* type = consume_array(module_->signature_zone.get());
- module_->add_array_type(type);
- break;
- }
- case kWasmArrayExtendingTypeCode: {
+ case kWasmArrayTypeCode:
+ case kWasmArraySubtypeCode: {
if (!enabled_features_.has_gc()) {
errorf(pc(),
"invalid array type definition, enable with "
@@ -626,12 +618,20 @@ class ModuleDecoderImpl : public Decoder {
break;
}
const ArrayType* type = consume_array(module_->signature_zone.get());
- module_->add_array_type(type);
- uint32_t super_index = consume_u32v("supertype");
- if (!module_->has_array(super_index)) {
- errorf(pc(), "invalid array supertype: %d", super_index);
- break;
+ uint32_t super_index = kNoSuperType;
+ if (kind == kWasmArraySubtypeCode) {
+ HeapType super_type = consume_super_type();
+ if (super_type == HeapType::kData) {
+ super_index = kGenericSuperType;
+ } else if (super_type.is_index()) {
+ super_index = super_type.representation();
+ } else {
+ errorf(pc(), "type %d: invalid supertype %d", i,
+ super_type.code());
+ break;
+ }
}
+ module_->add_array_type(type, super_index);
break;
}
default:
@@ -639,6 +639,46 @@ class ModuleDecoderImpl : public Decoder {
break;
}
}
+ // Check validity of explicitly defined supertypes.
+ const WasmModule* module = module_.get();
+ for (uint32_t i = 0; ok() && i < types_count; ++i) {
+ uint32_t explicit_super = module_->supertype(i);
+ if (explicit_super == kNoSuperType) continue;
+ if (explicit_super == kGenericSuperType) continue;
+ DCHECK_LT(explicit_super, types_count); // {consume_super_type} checks.
+ // Only types that have an explicit supertype themselves can be explicit
+ // supertypes of other types.
+ if (!module->has_supertype(explicit_super)) {
+ errorf("type %d has invalid explicit supertype %d", i, explicit_super);
+ continue;
+ }
+ int depth = GetSubtypingDepth(module, i);
+ if (depth > static_cast<int>(kV8MaxRttSubtypingDepth)) {
+ errorf("type %d: subtyping depth is greater than allowed", i);
+ continue;
+ }
+ if (depth == -1) {
+ errorf("type %d: cyclic inheritance", i);
+ continue;
+ }
+ switch (module_->type_kinds[i]) {
+ case kWasmStructTypeCode:
+ if (!module->has_struct(explicit_super)) break;
+ if (!StructIsSubtypeOf(i, explicit_super, module, module)) break;
+ continue;
+ case kWasmArrayTypeCode:
+ if (!module->has_array(explicit_super)) break;
+ if (!ArrayIsSubtypeOf(i, explicit_super, module, module)) break;
+ continue;
+ case kWasmFunctionTypeCode:
+ if (!module->has_signature(explicit_super)) break;
+ if (!FunctionIsSubtypeOf(i, explicit_super, module, module)) break;
+ continue;
+ default:
+ UNREACHABLE();
+ }
+ errorf("type %d has invalid explicit supertype %d", i, explicit_super);
+ }
module_->signature_map.Freeze();
}
@@ -1109,7 +1149,7 @@ class ModuleDecoderImpl : public Decoder {
// Decode module name, ignore the rest.
// Function and local names will be decoded when needed.
- if (name_type == NameSectionKindCode::kModule) {
+ if (name_type == NameSectionKindCode::kModuleCode) {
WireBytesRef name = consume_string(&inner, false, "module name");
if (inner.ok() && validate_utf8(&inner, name)) {
module_->name = name;
@@ -1787,6 +1827,15 @@ class ModuleDecoderImpl : public Decoder {
return result;
}
+ HeapType consume_super_type() {
+ uint32_t type_length;
+ HeapType result = value_type_reader::read_heap_type<kFullValidation>(
+ this, this->pc(), &type_length, module_.get(),
+ origin_ == kWasmOrigin ? enabled_features_ : WasmFeatures::None());
+ consume_bytes(type_length, "supertype");
+ return result;
+ }
+
ValueType consume_storage_type() {
uint8_t opcode = read_u8<kFullValidation>(this->pc());
switch (opcode) {
@@ -2363,7 +2412,7 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
uint32_t name_payload_len = decoder.consume_u32v("name payload length");
if (!decoder.checkAvailable(name_payload_len)) break;
- if (name_type != NameSectionKindCode::kFunction) {
+ if (name_type != NameSectionKindCode::kFunctionCode) {
decoder.consume_bytes(name_payload_len, "name subsection payload");
continue;
}
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index 1040f77ecd..4eb13352d8 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -194,6 +194,41 @@ Handle<Map> CreateArrayMap(Isolate* isolate, const WasmModule* module,
return map;
}
+void CreateMapForType(Isolate* isolate, const WasmModule* module,
+ int type_index, Handle<WasmInstanceObject> instance,
+ Handle<FixedArray> maps) {
+ // Recursive calls for supertypes may already have created this map.
+ if (maps->get(type_index).IsMap()) return;
+ Handle<Map> rtt_parent;
+ // If the type with {type_index} has an explicit supertype, make sure the
+ // map for that supertype is created first, so that the supertypes list
+ // that's cached on every RTT can be set up correctly.
+ uint32_t supertype = module->supertype(type_index);
+ if (supertype != kNoSuperType && supertype != kGenericSuperType) {
+ // This recursion is safe, because kV8MaxRttSubtypingDepth limits the
+ // number of recursive steps, so we won't overflow the stack.
+ CreateMapForType(isolate, module, supertype, instance, maps);
+ rtt_parent = handle(Map::cast(maps->get(supertype)), isolate);
+ }
+ Handle<Map> map;
+ switch (module->type_kinds[type_index]) {
+ case kWasmStructTypeCode:
+ map = CreateStructMap(isolate, module, type_index, rtt_parent, instance);
+ break;
+ case kWasmArrayTypeCode:
+ map = CreateArrayMap(isolate, module, type_index, rtt_parent, instance);
+ break;
+ case kWasmFunctionTypeCode:
+ // TODO(7748): Think about canonicalizing rtts to make them work for
+ // identical function types.
+ map = Map::Copy(isolate, isolate->wasm_exported_function_map(),
+ "fresh function map for function type canonical rtt "
+ "initialization");
+ break;
+ }
+ maps->set(type_index, *map);
+}
+
namespace {
// TODO(7748): Consider storing this array in Maps'
@@ -618,9 +653,12 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
Handle<FixedArray> tables = isolate_->factory()->NewFixedArray(table_count);
for (int i = module_->num_imported_tables; i < table_count; i++) {
const WasmTable& table = module_->tables[i];
+ // Initialize tables with null for now. We will initialize non-defaultable
+ // tables later, in {InitializeIndirectFunctionTables}.
Handle<WasmTableObject> table_obj = WasmTableObject::New(
isolate_, instance, table.type, table.initial_size,
- table.has_maximum_size, table.maximum_size, nullptr);
+ table.has_maximum_size, table.maximum_size, nullptr,
+ isolate_->factory()->null_value());
tables->set(i, *table_obj);
}
instance->set_tables(*tables);
@@ -661,28 +699,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
if (enabled_.has_gc()) {
Handle<FixedArray> maps = isolate_->factory()->NewFixedArray(
static_cast<int>(module_->type_kinds.size()));
- for (int map_index = 0;
- map_index < static_cast<int>(module_->type_kinds.size());
- map_index++) {
- Handle<Map> map;
- switch (module_->type_kinds[map_index]) {
- case kWasmStructTypeCode:
- map = CreateStructMap(isolate_, module_, map_index, Handle<Map>(),
- instance);
- break;
- case kWasmArrayTypeCode:
- map = CreateArrayMap(isolate_, module_, map_index, Handle<Map>(),
- instance);
- break;
- case kWasmFunctionTypeCode:
- // TODO(7748): Think about canonicalizing rtts to make them work for
- // identical function types.
- map = Map::Copy(isolate_, isolate_->wasm_exported_function_map(),
- "fresh function map for function type canonical rtt "
- "initialization");
- break;
- }
- maps->set(map_index, *map);
+ for (uint32_t index = 0; index < module_->type_kinds.size(); index++) {
+ CreateMapForType(isolate_, module_, index, instance, maps);
}
instance->set_managed_object_maps(*maps);
}
@@ -830,6 +848,39 @@ MaybeHandle<Object> InstanceBuilder::LookupImport(uint32_t index,
return result;
}
+namespace {
+bool HasDefaultToNumberBehaviour(Isolate* isolate,
+ Handle<JSFunction> function) {
+ // Disallow providing a [Symbol.toPrimitive] member.
+ LookupIterator to_primitive_it{isolate, function,
+ isolate->factory()->to_primitive_symbol()};
+ if (to_primitive_it.state() != LookupIterator::NOT_FOUND) return false;
+
+ // The {valueOf} member must be the default "ObjectPrototypeValueOf".
+ LookupIterator value_of_it{isolate, function,
+ isolate->factory()->valueOf_string()};
+ if (value_of_it.state() != LookupIterator::DATA) return false;
+ Handle<Object> value_of = value_of_it.GetDataValue();
+ if (!value_of->IsJSFunction()) return false;
+ Builtin value_of_builtin_id =
+ Handle<JSFunction>::cast(value_of)->code().builtin_id();
+ if (value_of_builtin_id != Builtin::kObjectPrototypeValueOf) return false;
+
+ // The {toString} member must be the default "FunctionPrototypeToString".
+ LookupIterator to_string_it{isolate, function,
+ isolate->factory()->toString_string()};
+ if (to_string_it.state() != LookupIterator::DATA) return false;
+ Handle<Object> to_string = to_string_it.GetDataValue();
+ if (!to_string->IsJSFunction()) return false;
+ Builtin to_string_builtin_id =
+ Handle<JSFunction>::cast(to_string)->code().builtin_id();
+ if (to_string_builtin_id != Builtin::kFunctionPrototypeToString) return false;
+
+ // Just a default function, which will convert to "Nan". Accept this.
+ return true;
+}
+} // namespace
+
// Look up an import value in the {ffi_} object specifically for linking an
// asm.js module. This only performs non-observable lookups, which allows
// falling back to JavaScript proper (and hence re-executing all lookups) if
@@ -844,7 +895,6 @@ MaybeHandle<Object> InstanceBuilder::LookupImportAsm(
// Perform lookup of the given {import_name} without causing any observable
// side-effect. We only accept accesses that resolve to data properties,
// which is indicated by the asm.js spec in section 7 ("Linking") as well.
- Handle<Object> result;
PropertyKey key(isolate_, Handle<Name>::cast(import_name));
LookupIterator it(isolate_, ffi_.ToHandleChecked(), key);
switch (it.state()) {
@@ -858,14 +908,23 @@ MaybeHandle<Object> InstanceBuilder::LookupImportAsm(
case LookupIterator::NOT_FOUND:
// Accepting missing properties as undefined does not cause any
// observable difference from JavaScript semantics, we are lenient.
- result = isolate_->factory()->undefined_value();
- break;
- case LookupIterator::DATA:
- result = it.GetDataValue();
- break;
+ return isolate_->factory()->undefined_value();
+ case LookupIterator::DATA: {
+ Handle<Object> value = it.GetDataValue();
+ // For legacy reasons, we accept functions for imported globals (see
+ // {ProcessImportedGlobal}), but only if we can easily determine that
+ // their Number-conversion is side effect free and returns NaN (which is
+ // the case as long as "valueOf" (or others) are not overwritten).
+ if (value->IsJSFunction() &&
+ module_->import_table[index].kind == kExternalGlobal &&
+ !HasDefaultToNumberBehaviour(isolate_,
+ Handle<JSFunction>::cast(value))) {
+ return ReportLinkError("function has special ToNumber behaviour", index,
+ import_name);
+ }
+ return value;
+ }
}
-
- return result;
}
// Load data segments into the memory.
@@ -1341,9 +1400,9 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
// Accepting {JSFunction} on top of just primitive values here is a
// workaround to support legacy asm.js code with broken binding. Note
// that using {NaN} (or Smi::zero()) here is what using the observable
- // conversion via {ToPrimitive} would produce as well.
- // TODO(wasm): Still observable if Function.prototype.valueOf or friends
- // are patched, we might need to check for that as well.
+ // conversion via {ToPrimitive} would produce as well. {LookupImportAsm}
+ // checked via {HasDefaultToNumberBehaviour} that "valueOf" or friends have
+ // not been patched.
if (value->IsJSFunction()) value = isolate_->factory()->nan_value();
if (value->IsPrimitive()) {
MaybeHandle<Object> converted = global.type == kWasmI32
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index 22bc7d259a..0a4a2207ff 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -312,33 +312,29 @@ void AsyncStreamingDecoder::Abort() {
namespace {
-class TopTierCompiledCallback {
+class CompilationChunkFinishedCallback {
public:
- TopTierCompiledCallback(
+ CompilationChunkFinishedCallback(
std::weak_ptr<NativeModule> native_module,
AsyncStreamingDecoder::ModuleCompiledCallback callback)
: native_module_(std::move(native_module)),
callback_(std::move(callback)) {}
void operator()(CompilationEvent event) const {
- if (event != CompilationEvent::kFinishedTopTierCompilation) return;
+ if (event != CompilationEvent::kFinishedCompilationChunk &&
+ event != CompilationEvent::kFinishedTopTierCompilation) {
+ return;
+ }
// If the native module is still alive, get back a shared ptr and call the
// callback.
if (std::shared_ptr<NativeModule> native_module = native_module_.lock()) {
callback_(native_module);
}
-#ifdef DEBUG
- DCHECK(!called_);
- called_ = true;
-#endif
}
private:
const std::weak_ptr<NativeModule> native_module_;
const AsyncStreamingDecoder::ModuleCompiledCallback callback_;
-#ifdef DEBUG
- mutable bool called_ = false;
-#endif
};
} // namespace
@@ -347,7 +343,7 @@ void AsyncStreamingDecoder::NotifyNativeModuleCreated(
const std::shared_ptr<NativeModule>& native_module) {
if (!module_compiled_callback_) return;
auto* comp_state = native_module->compilation_state();
- comp_state->AddCallback(TopTierCompiledCallback{
+ comp_state->AddCallback(CompilationChunkFinishedCallback{
std::move(native_module), std::move(module_compiled_callback_)});
module_compiled_callback_ = {};
}
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index c12496759f..29482d007b 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -284,8 +284,8 @@ constexpr bool is_defaultable(ValueKind kind) {
// representation (for reference types), and an inheritance depth (for rtts
// only). Those are encoded into 32 bits using base::BitField. The underlying
// ValueKind enumeration includes four elements which do not strictly correspond
-// to value types: the two packed types i8 and i16, the type of void blocks
-// (stmt), and a bottom value (for internal use).
+// to value types: the two packed types i8 and i16, the void type (for control
+// structures), and a bottom value (for internal use).
class ValueType {
public:
/******************************* Constructors *******************************/
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 0c8a570c71..27687f6e1d 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -191,7 +191,7 @@ std::unique_ptr<const byte[]> WasmCode::ConcatenateBytes(
void WasmCode::RegisterTrapHandlerData() {
DCHECK(!has_trap_handler_index());
- if (kind() != WasmCode::kFunction) return;
+ if (kind() != WasmCode::kWasmFunction) return;
if (protected_instructions_size_ == 0) return;
Address base = instruction_start();
@@ -217,40 +217,23 @@ bool WasmCode::ShouldBeLogged(Isolate* isolate) {
isolate->is_profiling();
}
-void WasmCode::LogCode(Isolate* isolate, const char* source_url,
- int script_id) const {
- DCHECK(ShouldBeLogged(isolate));
- if (IsAnonymous()) return;
+std::string WasmCode::DebugName() const {
+ if (IsAnonymous()) {
+ return "anonymous function";
+ }
- ModuleWireBytes wire_bytes(native_module_->wire_bytes());
- const WasmModule* module = native_module_->module();
+ ModuleWireBytes wire_bytes(native_module()->wire_bytes());
+ const WasmModule* module = native_module()->module();
WireBytesRef name_ref =
module->lazily_generated_names.LookupFunctionName(wire_bytes, index());
WasmName name = wire_bytes.GetNameOrNull(name_ref);
-
- const WasmDebugSymbols& debug_symbols = module->debug_symbols;
- auto load_wasm_source_map = isolate->wasm_load_source_map_callback();
- auto source_map = native_module_->GetWasmSourceMap();
- if (!source_map && debug_symbols.type == WasmDebugSymbols::Type::SourceMap &&
- !debug_symbols.external_url.is_empty() && load_wasm_source_map) {
- WasmName external_url =
- wire_bytes.GetNameOrNull(debug_symbols.external_url);
- std::string external_url_string(external_url.data(), external_url.size());
- HandleScope scope(isolate);
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- Local<v8::String> source_map_str =
- load_wasm_source_map(v8_isolate, external_url_string.c_str());
- native_module_->SetWasmSourceMap(
- std::make_unique<WasmModuleSourceMap>(v8_isolate, source_map_str));
- }
-
std::string name_buffer;
if (kind() == kWasmToJsWrapper) {
name_buffer = "wasm-to-js:";
size_t prefix_len = name_buffer.size();
constexpr size_t kMaxSigLength = 128;
name_buffer.resize(prefix_len + kMaxSigLength);
- const FunctionSig* sig = module->functions[index_].sig;
+ const FunctionSig* sig = module->functions[index()].sig;
size_t sig_length = PrintSignature(
base::VectorOf(&name_buffer[prefix_len], kMaxSigLength), sig);
name_buffer.resize(prefix_len + sig_length);
@@ -259,13 +242,41 @@ void WasmCode::LogCode(Isolate* isolate, const char* source_url,
name_buffer += '-';
name_buffer.append(name.begin(), name.size());
}
- name = base::VectorOf(name_buffer);
} else if (name.empty()) {
name_buffer.resize(32);
name_buffer.resize(
SNPrintF(base::VectorOf(&name_buffer.front(), name_buffer.size()),
"wasm-function[%d]", index()));
- name = base::VectorOf(name_buffer);
+ } else {
+ name_buffer.append(name.begin(), name.end());
+ }
+ return name_buffer;
+}
+
+void WasmCode::LogCode(Isolate* isolate, const char* source_url,
+ int script_id) const {
+ DCHECK(ShouldBeLogged(isolate));
+ if (IsAnonymous()) return;
+
+ ModuleWireBytes wire_bytes(native_module_->wire_bytes());
+ const WasmModule* module = native_module_->module();
+ std::string fn_name = DebugName();
+ WasmName name = base::VectorOf(fn_name);
+
+ const WasmDebugSymbols& debug_symbols = module->debug_symbols;
+ auto load_wasm_source_map = isolate->wasm_load_source_map_callback();
+ auto source_map = native_module_->GetWasmSourceMap();
+ if (!source_map && debug_symbols.type == WasmDebugSymbols::Type::SourceMap &&
+ !debug_symbols.external_url.is_empty() && load_wasm_source_map) {
+ WasmName external_url =
+ wire_bytes.GetNameOrNull(debug_symbols.external_url);
+ std::string external_url_string(external_url.data(), external_url.size());
+ HandleScope scope(isolate);
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ Local<v8::String> source_map_str =
+ load_wasm_source_map(v8_isolate, external_url_string.c_str());
+ native_module_->SetWasmSourceMap(
+ std::make_unique<WasmModuleSourceMap>(v8_isolate, source_map_str));
}
// Record source positions before adding code, otherwise when code is added,
@@ -334,15 +345,16 @@ void WasmCode::Validate() const {
#endif
}
-void WasmCode::MaybePrint(const char* name) const {
+void WasmCode::MaybePrint() const {
// Determines whether flags want this code to be printed.
bool function_index_matches =
(!IsAnonymous() &&
FLAG_print_wasm_code_function_index == static_cast<int>(index()));
- if (FLAG_print_code ||
- (kind() == kFunction ? (FLAG_print_wasm_code || function_index_matches)
- : FLAG_print_wasm_stub_code)) {
- Print(name);
+ if (FLAG_print_code || (kind() == kWasmFunction
+ ? (FLAG_print_wasm_code || function_index_matches)
+ : FLAG_print_wasm_stub_code)) {
+ std::string name = DebugName();
+ Print(name.c_str());
}
}
@@ -364,7 +376,7 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
if (name) os << "name: " << name << "\n";
if (!IsAnonymous()) os << "index: " << index() << "\n";
os << "kind: " << GetWasmCodeKindAsString(kind()) << "\n";
- if (kind() == kFunction) {
+ if (kind() == kWasmFunction) {
DCHECK(is_liftoff() || tier() == ExecutionTier::kTurbofan);
const char* compiler =
is_liftoff() ? (for_debugging() ? "Liftoff (debug)" : "Liftoff")
@@ -438,8 +450,8 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
os << " registers: ";
uint32_t register_bits = entry.register_bits();
int bits = 32 - base::bits::CountLeadingZeros32(register_bits);
- for (int i = bits - 1; i >= 0; --i) {
- os << ((register_bits >> i) & 1);
+ for (int j = bits - 1; j >= 0; --j) {
+ os << ((register_bits >> j) & 1);
}
}
os << "\n";
@@ -458,7 +470,7 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
switch (kind) {
- case WasmCode::kFunction:
+ case WasmCode::kWasmFunction:
return "wasm function";
case WasmCode::kWasmToCapiWrapper:
return "wasm-to-capi";
@@ -958,6 +970,7 @@ BoundsCheckStrategy GetBoundsChecks(const WasmModule* module) {
} // namespace
NativeModule::NativeModule(const WasmFeatures& enabled,
+ DynamicTiering dynamic_tiering,
VirtualMemory code_space,
std::shared_ptr<const WasmModule> module,
std::shared_ptr<Counters> async_counters,
@@ -976,8 +989,8 @@ NativeModule::NativeModule(const WasmFeatures& enabled,
DCHECK_NOT_NULL(shared_this);
DCHECK_NULL(*shared_this);
shared_this->reset(this);
- compilation_state_ =
- CompilationState::New(*shared_this, std::move(async_counters));
+ compilation_state_ = CompilationState::New(
+ *shared_this, std::move(async_counters), dynamic_tiering);
compilation_state_->InitCompileJob();
DCHECK_NOT_NULL(module_);
if (module_->num_declared_functions > 0) {
@@ -1043,8 +1056,8 @@ void NativeModule::LogWasmCodes(Isolate* isolate, Script script) {
}
CompilationEnv NativeModule::CreateCompilationEnv() const {
- return {module(), bounds_checks_, kRuntimeExceptionSupport,
- enabled_features_};
+ return {module(), bounds_checks_, kRuntimeExceptionSupport, enabled_features_,
+ compilation_state()->dynamic_tiering()};
}
WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
@@ -1117,22 +1130,22 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());
std::unique_ptr<WasmCode> new_code{
- new WasmCode{this, // native_module
- kAnonymousFuncIndex, // index
- dst_code_bytes, // instructions
- stack_slots, // stack_slots
- 0, // tagged_parameter_slots
- safepoint_table_offset, // safepoint_table_offset
- handler_table_offset, // handler_table_offset
- constant_pool_offset, // constant_pool_offset
- code_comments_offset, // code_comments_offset
- instructions.length(), // unpadded_binary_size
- {}, // protected_instructions
- reloc_info.as_vector(), // reloc_info
- source_pos.as_vector(), // source positions
- WasmCode::kFunction, // kind
- ExecutionTier::kNone, // tier
- kNoDebugging}}; // for_debugging
+ new WasmCode{this, // native_module
+ kAnonymousFuncIndex, // index
+ dst_code_bytes, // instructions
+ stack_slots, // stack_slots
+ 0, // tagged_parameter_slots
+ safepoint_table_offset, // safepoint_table_offset
+ handler_table_offset, // handler_table_offset
+ constant_pool_offset, // constant_pool_offset
+ code_comments_offset, // code_comments_offset
+ instructions.length(), // unpadded_binary_size
+ {}, // protected_instructions
+ reloc_info.as_vector(), // reloc_info
+ source_pos.as_vector(), // source positions
+ WasmCode::kWasmFunction, // kind
+ ExecutionTier::kNone, // tier
+ kNoDebugging}}; // for_debugging
new_code->MaybePrint();
new_code->Validate();
@@ -1255,6 +1268,7 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
safepoint_table_offset, handler_table_offset, constant_pool_offset,
code_comments_offset, instr_size, protected_instructions_data, reloc_info,
source_position_table, kind, tier, for_debugging}};
+
code->MaybePrint();
code->Validate();
@@ -1291,7 +1305,7 @@ WasmCode::Kind GetCodeKind(const WasmCompilationResult& result) {
case WasmCompilationResult::kWasmToJsWrapper:
return WasmCode::Kind::kWasmToJsWrapper;
case WasmCompilationResult::kFunction:
- return WasmCode::Kind::kFunction;
+ return WasmCode::Kind::kWasmFunction;
default:
UNREACHABLE();
}
@@ -1971,7 +1985,6 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
DCHECK_GT(size, 0);
size_t allocate_page_size = page_allocator->AllocatePageSize();
size = RoundUp(size, allocate_page_size);
- if (!BackingStore::ReserveAddressSpace(size)) return {};
if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
// When we start exposing Wasm in jitless mode, then the jitless flag
@@ -1979,10 +1992,7 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
DCHECK(!FLAG_jitless);
VirtualMemory mem(page_allocator, size, hint, allocate_page_size,
VirtualMemory::kMapAsJittable);
- if (!mem.IsReserved()) {
- BackingStore::ReleaseReservation(size);
- return {};
- }
+ if (!mem.IsReserved()) return {};
TRACE_HEAP("VMem alloc: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n", mem.address(),
mem.end(), mem.size());
@@ -2115,6 +2125,11 @@ void WasmCodeManager::SetThreadWritable(bool writable) {
MemoryProtectionKeyPermission permissions =
writable ? kNoRestrictions : kDisableWrite;
+ // When switching to writable we should not already be writable. Otherwise
+ // this points at a problem with counting writers, or with wrong
+ // initialization (globally or per thread).
+ DCHECK_IMPLIES(writable, !MemoryProtectionKeyWritable());
+
TRACE_HEAP("Setting memory protection key %d to writable: %d.\n",
memory_protection_key_, writable);
SetPermissionsForMemoryProtectionKey(memory_protection_key_, permissions);
@@ -2124,6 +2139,10 @@ bool WasmCodeManager::HasMemoryProtectionKeySupport() const {
return memory_protection_key_ != kNoMemoryProtectionKey;
}
+bool WasmCodeManager::MemoryProtectionKeyWritable() const {
+ return wasm::MemoryProtectionKeyWritable(memory_protection_key_);
+}
+
void WasmCodeManager::InitializeMemoryProtectionKeyForTesting() {
if (memory_protection_key_ == kNoMemoryProtectionKey) {
memory_protection_key_ = AllocateMemoryProtectionKey();
@@ -2183,8 +2202,11 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
size_t size = code_space.size();
Address end = code_space.end();
std::shared_ptr<NativeModule> ret;
- new NativeModule(enabled, std::move(code_space), std::move(module),
- isolate->async_counters(), &ret);
+ DynamicTiering dynamic_tiering = isolate->IsWasmDynamicTieringEnabled()
+ ? DynamicTiering::kEnabled
+ : DynamicTiering::kDisabled;
+ new NativeModule(enabled, dynamic_tiering, std::move(code_space),
+ std::move(module), isolate->async_counters(), &ret);
// The constructor initialized the shared_ptr.
DCHECK_NOT_NULL(ret);
TRACE_HEAP("New NativeModule %p: Mem: 0x%" PRIxPTR ",+%zu\n", ret.get(),
@@ -2414,7 +2436,6 @@ void WasmCodeManager::FreeNativeModule(
#endif // V8_OS_WIN64
lookup_map_.erase(code_space.address());
- BackingStore::ReleaseReservation(code_space.size());
code_space.Free();
DCHECK(!code_space.IsReserved());
}
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 70ef6d75a9..ad7e4ab26b 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -156,12 +156,7 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
class V8_EXPORT_PRIVATE WasmCode final {
public:
- enum Kind {
- kFunction,
- kWasmToCapiWrapper,
- kWasmToJsWrapper,
- kJumpTable
- };
+ enum Kind { kWasmFunction, kWasmToCapiWrapper, kWasmToJsWrapper, kJumpTable };
// Each runtime stub is identified by an id. This id is used to reference the
// stub via {RelocInfo::WASM_STUB_CALL} and gets resolved during relocation.
@@ -318,7 +313,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
void Validate() const;
void Print(const char* name = nullptr) const;
- void MaybePrint(const char* name = nullptr) const;
+ void MaybePrint() const;
void Disassemble(const char* name, std::ostream& os,
Address current_pc = kNullAddress) const;
@@ -420,6 +415,10 @@ class V8_EXPORT_PRIVATE WasmCode final {
std::unique_ptr<const byte[]> ConcatenateBytes(
std::initializer_list<base::Vector<const byte>>);
+ // Tries to get a reasonable name. Lazily looks up the name section, and falls
+ // back to the function index. Return value is guaranteed to not be empty.
+ std::string DebugName() const;
+
// Code objects that have been registered with the global trap handler within
// this process, will have a {trap_handler_index} associated with them.
int trap_handler_index() const {
@@ -729,7 +728,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
void LogWasmCodes(Isolate*, Script);
- CompilationState* compilation_state() { return compilation_state_.get(); }
+ CompilationState* compilation_state() const {
+ return compilation_state_.get();
+ }
// Create a {CompilationEnv} object for compilation. The caller has to ensure
// that the {WasmModule} pointer stays valid while the {CompilationEnv} is
@@ -849,7 +850,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
};
// Private constructor, called via {WasmCodeManager::NewNativeModule()}.
- NativeModule(const WasmFeatures& enabled_features, VirtualMemory code_space,
+ NativeModule(const WasmFeatures& enabled_features,
+ DynamicTiering dynamic_tiering, VirtualMemory code_space,
std::shared_ptr<const WasmModule> module,
std::shared_ptr<Counters> async_counters,
std::shared_ptr<NativeModule>* shared_this);
@@ -1038,6 +1040,11 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// Returns true if there is PKU support, false otherwise.
bool HasMemoryProtectionKeySupport() const;
+ // Returns {true} if the memory protection key is write-enabled for the
+ // current thread.
+ // Can only be called if {HasMemoryProtectionKeySupport()} is {true}.
+ bool MemoryProtectionKeyWritable() const;
+
// This allocates a memory protection key (if none was allocated before),
// independent of the --wasm-memory-protection-keys flag.
void InitializeMemoryProtectionKeyForTesting();
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index 726ceaa018..5bb12bc863 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -50,9 +50,9 @@ enum ValueTypeCode : uint8_t {
constexpr uint8_t kWasmFunctionTypeCode = 0x60;
constexpr uint8_t kWasmStructTypeCode = 0x5f;
constexpr uint8_t kWasmArrayTypeCode = 0x5e;
-constexpr uint8_t kWasmFunctionExtendingTypeCode = 0x5d;
-constexpr uint8_t kWasmStructExtendingTypeCode = 0x5c;
-constexpr uint8_t kWasmArrayExtendingTypeCode = 0x5b;
+constexpr uint8_t kWasmFunctionSubtypeCode = 0x5d;
+constexpr uint8_t kWasmStructSubtypeCode = 0x5c;
+constexpr uint8_t kWasmArraySubtypeCode = 0x5b;
// Binary encoding of import/export kinds.
enum ImportExportKindCode : uint8_t {
@@ -118,19 +118,19 @@ constexpr uint8_t kNoCompilationHint = kMaxUInt8;
// Binary encoding of name section kinds.
enum NameSectionKindCode : uint8_t {
- kModule = 0,
- kFunction = 1,
- kLocal = 2,
+ kModuleCode = 0,
+ kFunctionCode = 1,
+ kLocalCode = 2,
// https://github.com/WebAssembly/extended-name-section/
- kLabel = 3,
- kType = 4,
- kTable = 5,
- kMemory = 6,
- kGlobal = 7,
- kElementSegment = 8,
- kDataSegment = 9,
+ kLabelCode = 3,
+ kTypeCode = 4,
+ kTableCode = 5,
+ kMemoryCode = 6,
+ kGlobalCode = 7,
+ kElementSegmentCode = 8,
+ kDataSegmentCode = 9,
// https://github.com/WebAssembly/gc/issues/193
- kField = 10
+ kFieldCode = 10
};
constexpr size_t kWasmPageSize = 0x10000;
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 65f05ad507..a0ecab9596 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -194,7 +194,7 @@ class DebugInfoImpl {
base::MutexGuard guard(&mutex_);
if (!type_names_) {
type_names_ = std::make_unique<NameMap>(DecodeNameMap(
- native_module_->wire_bytes(), NameSectionKindCode::kType));
+ native_module_->wire_bytes(), NameSectionKindCode::kTypeCode));
}
return type_names_->GetName(type_index);
}
@@ -203,7 +203,7 @@ class DebugInfoImpl {
base::MutexGuard guard(&mutex_);
if (!local_names_) {
local_names_ = std::make_unique<IndirectNameMap>(DecodeIndirectNameMap(
- native_module_->wire_bytes(), NameSectionKindCode::kLocal));
+ native_module_->wire_bytes(), NameSectionKindCode::kLocalCode));
}
return local_names_->GetName(func_index, local_index);
}
@@ -212,7 +212,7 @@ class DebugInfoImpl {
base::MutexGuard guard(&mutex_);
if (!field_names_) {
field_names_ = std::make_unique<IndirectNameMap>(DecodeIndirectNameMap(
- native_module_->wire_bytes(), NameSectionKindCode::kField));
+ native_module_->wire_bytes(), NameSectionKindCode::kFieldCode));
}
return field_names_->GetName(struct_index, field_index);
}
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 6da33f1ab2..93c3aae68c 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -11,9 +11,11 @@
#include "src/diagnostics/compilation-statistics.h"
#include "src/execution/frames.h"
#include "src/execution/v8threads.h"
+#include "src/handles/global-handles-inl.h"
#include "src/logging/counters.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-promise.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
#include "src/strings/string-hasher-inl.h"
#include "src/utils/ostreams.h"
@@ -1034,10 +1036,10 @@ void WasmEngine::RemoveIsolate(Isolate* isolate) {
for (auto* native_module : info->native_modules) {
DCHECK_EQ(1, native_modules_.count(native_module));
DCHECK_EQ(1, native_modules_[native_module]->isolates.count(isolate));
- auto* info = native_modules_[native_module].get();
- info->isolates.erase(isolate);
+ auto* module = native_modules_[native_module].get();
+ module->isolates.erase(isolate);
if (current_gc_info_) {
- for (WasmCode* code : info->potentially_dead_code) {
+ for (WasmCode* code : module->potentially_dead_code) {
current_gc_info_->dead_code.erase(code);
}
}
@@ -1228,9 +1230,9 @@ void WasmEngine::StreamingCompilationFailed(size_t prefix_hash) {
void WasmEngine::FreeNativeModule(NativeModule* native_module) {
base::MutexGuard guard(&mutex_);
- auto it = native_modules_.find(native_module);
- DCHECK_NE(native_modules_.end(), it);
- for (Isolate* isolate : it->second->isolates) {
+ auto module = native_modules_.find(native_module);
+ DCHECK_NE(native_modules_.end(), module);
+ for (Isolate* isolate : module->second->isolates) {
DCHECK_EQ(1, isolates_.count(isolate));
IsolateInfo* info = isolates_[isolate].get();
DCHECK_EQ(1, info->native_modules.count(native_module));
@@ -1274,7 +1276,7 @@ void WasmEngine::FreeNativeModule(NativeModule* native_module) {
native_module, current_gc_info_->dead_code.size());
}
native_module_cache_.Erase(native_module);
- native_modules_.erase(it);
+ native_modules_.erase(module);
}
namespace {
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 6fc3278141..0d8c14a641 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -548,6 +548,7 @@ inline void* ArrayElementAddress(WasmArray array, uint32_t index,
void array_copy_wrapper(Address raw_instance, Address raw_dst_array,
uint32_t dst_index, Address raw_src_array,
uint32_t src_index, uint32_t length) {
+ DCHECK_GT(length, 0);
ThreadNotInWasmScope thread_not_in_wasm_scope;
DisallowGarbageCollection no_gc;
WasmArray dst_array = WasmArray::cast(Object(raw_dst_array));
diff --git a/deps/v8/src/wasm/wasm-external-refs.h b/deps/v8/src/wasm/wasm-external-refs.h
index 3365e109fb..24d4d35bec 100644
--- a/deps/v8/src/wasm/wasm-external-refs.h
+++ b/deps/v8/src/wasm/wasm-external-refs.h
@@ -111,6 +111,7 @@ int32_t memory_copy_wrapper(Address data);
// zero-extend the result in the return register.
int32_t memory_fill_wrapper(Address data);
+// Assumes copy ranges are in-bounds and length > 0.
void array_copy_wrapper(Address raw_instance, Address raw_dst_array,
uint32_t dst_index, Address raw_src_array,
uint32_t src_index, uint32_t length);
diff --git a/deps/v8/src/wasm/wasm-feature-flags.h b/deps/v8/src/wasm/wasm-feature-flags.h
index ac8e8e16d7..cf9ef00bf8 100644
--- a/deps/v8/src/wasm/wasm-feature-flags.h
+++ b/deps/v8/src/wasm/wasm-feature-flags.h
@@ -55,7 +55,7 @@
\
/* Stack Switching proposal. */ \
/* https://github.com/WebAssembly/stack-switching */ \
- /* V8 side owner: thibaudm & fgm */ \
+ /* V8 side owner: thibaudm, fgm */ \
V(stack_switching, "stack switching", false)
// #############################################################################
@@ -67,12 +67,6 @@
// be shipped with enough lead time to the next branch to allow for
// stabilization.
#define FOREACH_WASM_STAGING_FEATURE_FLAG(V) /* (force 80 columns) */ \
- /* Reference Types, a.k.a. reftypes proposal. */ \
- /* https://github.com/WebAssembly/reference-types */ \
- /* V8 side owner: ahaas */ \
- /* Staged in v7.8. */ \
- V(reftypes, "reference type opcodes", false) \
- \
/* Tail call / return call proposal. */ \
/* https://github.com/webassembly/tail-call */ \
/* V8 side owner: thibaudm */ \
@@ -96,6 +90,13 @@
/* Shipped in v9.1 * */ \
V(simd, "SIMD opcodes", true) \
\
+ /* Reference Types, a.k.a. reftypes proposal. */ \
+ /* https://github.com/WebAssembly/reference-types */ \
+ /* V8 side owner: ahaas */ \
+ /* Staged in v7.8. */ \
+ /* Shipped in v9.6 * */ \
+ V(reftypes, "reference type opcodes", true) \
+ \
/* Threads proposal. */ \
/* https://github.com/webassembly/threads */ \
/* NOTE: This is enabled via chromium flag on desktop systems since v7.4, */ \
diff --git a/deps/v8/src/wasm/wasm-init-expr.cc b/deps/v8/src/wasm/wasm-init-expr.cc
index 14a7e3b6a6..c6641034ba 100644
--- a/deps/v8/src/wasm/wasm-init-expr.cc
+++ b/deps/v8/src/wasm/wasm-init-expr.cc
@@ -39,7 +39,11 @@ ValueType WasmInitExpr::type(const WasmModule* module,
case kRefNullConst:
return ValueType::Ref(immediate().heap_type, kNullable);
case kStructNewWithRtt:
+ case kStructNew:
+ case kStructNewDefaultWithRtt:
+ case kStructNewDefault:
case kArrayInit:
+ case kArrayInitStatic:
return ValueType::Ref(immediate().index, kNonNullable);
case kRttCanon:
return ValueType::Rtt(immediate().heap_type, 0);
diff --git a/deps/v8/src/wasm/wasm-init-expr.h b/deps/v8/src/wasm/wasm-init-expr.h
index bf68265b2a..551fce2991 100644
--- a/deps/v8/src/wasm/wasm-init-expr.h
+++ b/deps/v8/src/wasm/wasm-init-expr.h
@@ -34,7 +34,11 @@ class WasmInitExpr {
kRefNullConst,
kRefFuncConst,
kStructNewWithRtt,
+ kStructNew,
+ kStructNewDefaultWithRtt,
+ kStructNewDefault,
kArrayInit,
+ kArrayInitStatic,
kRttCanon,
kRttSub,
kRttFreshSub,
@@ -99,6 +103,31 @@ class WasmInitExpr {
return expr;
}
+ static WasmInitExpr StructNew(uint32_t index,
+ std::vector<WasmInitExpr> elements) {
+ WasmInitExpr expr;
+ expr.kind_ = kStructNew;
+ expr.immediate_.index = index;
+ expr.operands_ = std::move(elements);
+ return expr;
+ }
+
+ static WasmInitExpr StructNewDefaultWithRtt(uint32_t index,
+ WasmInitExpr rtt) {
+ WasmInitExpr expr;
+ expr.kind_ = kStructNewDefaultWithRtt;
+ expr.immediate_.index = index;
+ expr.operands_.push_back(std::move(rtt));
+ return expr;
+ }
+
+ static WasmInitExpr StructNewDefault(uint32_t index) {
+ WasmInitExpr expr;
+ expr.kind_ = kStructNewDefault;
+ expr.immediate_.index = index;
+ return expr;
+ }
+
static WasmInitExpr ArrayInit(uint32_t index,
std::vector<WasmInitExpr> elements) {
WasmInitExpr expr;
@@ -108,6 +137,15 @@ class WasmInitExpr {
return expr;
}
+ static WasmInitExpr ArrayInitStatic(uint32_t index,
+ std::vector<WasmInitExpr> elements) {
+ WasmInitExpr expr;
+ expr.kind_ = kArrayInitStatic;
+ expr.immediate_.index = index;
+ expr.operands_ = std::move(elements);
+ return expr;
+ }
+
static WasmInitExpr RttCanon(uint32_t index) {
WasmInitExpr expr;
expr.kind_ = kRttCanon;
@@ -157,6 +195,9 @@ class WasmInitExpr {
case kRefNullConst:
return immediate().heap_type == other.immediate().heap_type;
case kStructNewWithRtt:
+ case kStructNew:
+ case kStructNewDefaultWithRtt:
+ case kStructNewDefault:
if (immediate().index != other.immediate().index) return false;
DCHECK_EQ(operands().size(), other.operands().size());
for (uint32_t i = 0; i < operands().size(); i++) {
@@ -164,6 +205,7 @@ class WasmInitExpr {
}
return true;
case kArrayInit:
+ case kArrayInitStatic:
if (immediate().index != other.immediate().index) return false;
if (operands().size() != other.operands().size()) return false;
for (uint32_t i = 0; i < operands().size(); i++) {
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index ef514c3b4c..9f08f33be7 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -19,12 +19,14 @@
#include "src/execution/execution.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate.h"
+#include "src/handles/global-handles-inl.h"
#include "src/handles/handles.h"
#include "src/heap/factory.h"
#include "src/init/v8.h"
#include "src/objects/fixed-array.h"
#include "src/objects/instance-type.h"
#include "src/objects/js-promise-inl.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/templates.h"
#include "src/parsing/parse-info.h"
@@ -184,9 +186,6 @@ Local<String> v8_str(Isolate* isolate, const char* str) {
}
GET_FIRST_ARGUMENT_AS(Module)
-GET_FIRST_ARGUMENT_AS(Memory)
-GET_FIRST_ARGUMENT_AS(Table)
-GET_FIRST_ARGUMENT_AS(Global)
GET_FIRST_ARGUMENT_AS(Tag)
#undef GET_FIRST_ARGUMENT_AS
@@ -654,6 +653,25 @@ void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
return_value.Set(Boolean::New(isolate, validated));
}
+namespace {
+bool TransferPrototype(i::Isolate* isolate, i::Handle<i::JSObject> destination,
+ i::Handle<i::JSReceiver> source) {
+ i::MaybeHandle<i::HeapObject> maybe_prototype =
+ i::JSObject::GetPrototype(isolate, source);
+ i::Handle<i::HeapObject> prototype;
+ if (maybe_prototype.ToHandle(&prototype)) {
+ Maybe<bool> result = i::JSObject::SetPrototype(destination, prototype,
+ /*from_javascript=*/false,
+ internal::kThrowOnError);
+ if (!result.FromJust()) {
+ DCHECK(isolate->has_pending_exception());
+ return false;
+ }
+ }
+ return true;
+}
+} // namespace
+
// new WebAssembly.Module(bytes) -> WebAssembly.Module
void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
@@ -679,25 +697,38 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
- i::MaybeHandle<i::Object> module_obj;
+ i::MaybeHandle<i::WasmModuleObject> maybe_module_obj;
if (is_shared) {
// Make a copy of the wire bytes to avoid concurrent modification.
std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
memcpy(copy.get(), bytes.start(), bytes.length());
i::wasm::ModuleWireBytes bytes_copy(copy.get(),
copy.get() + bytes.length());
- module_obj = i::wasm::GetWasmEngine()->SyncCompile(
+ maybe_module_obj = i::wasm::GetWasmEngine()->SyncCompile(
i_isolate, enabled_features, &thrower, bytes_copy);
} else {
// The wire bytes are not shared, OK to use them directly.
- module_obj = i::wasm::GetWasmEngine()->SyncCompile(
+ maybe_module_obj = i::wasm::GetWasmEngine()->SyncCompile(
i_isolate, enabled_features, &thrower, bytes);
}
- if (module_obj.is_null()) return;
+ i::Handle<i::WasmModuleObject> module_obj;
+ if (!maybe_module_obj.ToHandle(&module_obj)) return;
+
+ // The infrastructure for `new Foo` calls allocates an object, which is
+ // available here as {args.This()}. We're going to discard this object
+ // and use {module_obj} instead, but it does have the correct prototype,
+ // which we must harvest from it. This makes a difference when the JS
+ // constructor function wasn't {WebAssembly.Module} directly, but some
+ // subclass: {module_obj} has {WebAssembly.Module}'s prototype at this
+ // point, so we must overwrite that with the correct prototype for {Foo}.
+ if (!TransferPrototype(i_isolate, module_obj,
+ Utils::OpenHandle(*args.This()))) {
+ return;
+ }
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- return_value.Set(Utils::ToLocal(module_obj.ToHandleChecked()));
+ return_value.Set(Utils::ToLocal(i::Handle<i::JSObject>::cast(module_obj)));
}
// WebAssembly.Module.imports(module) -> Array<Import>
@@ -754,37 +785,6 @@ void WebAssemblyModuleCustomSections(
args.GetReturnValue().Set(Utils::ToLocal(custom_sections));
}
-MaybeLocal<Value> WebAssemblyInstantiateImpl(Isolate* isolate,
- Local<Value> module,
- Local<Value> ffi) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-
- i::MaybeHandle<i::Object> instance_object;
- {
- ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
-
- // TODO(ahaas): These checks on the module should not be necessary here They
- // are just a workaround for https://crbug.com/837417.
- i::Handle<i::Object> module_obj = Utils::OpenHandle(*module);
- if (!module_obj->IsWasmModuleObject()) {
- thrower.TypeError("Argument 0 must be a WebAssembly.Module object");
- return {};
- }
-
- i::MaybeHandle<i::JSReceiver> maybe_imports =
- GetValueAsImports(ffi, &thrower);
- if (thrower.error()) return {};
-
- instance_object = i::wasm::GetWasmEngine()->SyncInstantiate(
- i_isolate, &thrower, i::Handle<i::WasmModuleObject>::cast(module_obj),
- maybe_imports, i::MaybeHandle<i::JSArrayBuffer>());
- }
-
- DCHECK_EQ(instance_object.is_null(), i_isolate->has_scheduled_exception());
- if (instance_object.is_null()) return {};
- return Utils::ToLocal(instance_object.ToHandleChecked());
-}
-
// new WebAssembly.Instance(module, imports) -> WebAssembly.Instance
void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
@@ -795,23 +795,48 @@ void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
if (i_isolate->wasm_instance_callback()(args)) return;
- ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
- if (!args.IsConstructCall()) {
- thrower.TypeError("WebAssembly.Instance must be invoked with 'new'");
- return;
- }
+ i::MaybeHandle<i::JSObject> maybe_instance_obj;
+ {
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
+ if (!args.IsConstructCall()) {
+ thrower.TypeError("WebAssembly.Instance must be invoked with 'new'");
+ return;
+ }
- GetFirstArgumentAsModule(args, &thrower);
- if (thrower.error()) return;
+ i::MaybeHandle<i::WasmModuleObject> maybe_module =
+ GetFirstArgumentAsModule(args, &thrower);
+ if (thrower.error()) return;
- // If args.Length < 2, this will be undefined - see FunctionCallbackInfo.
- // We'll check for that in WebAssemblyInstantiateImpl.
- Local<Value> data = args[1];
+ i::Handle<i::WasmModuleObject> module_obj = maybe_module.ToHandleChecked();
+
+ i::MaybeHandle<i::JSReceiver> maybe_imports =
+ GetValueAsImports(args[1], &thrower);
+ if (thrower.error()) return;
+
+ maybe_instance_obj = i::wasm::GetWasmEngine()->SyncInstantiate(
+ i_isolate, &thrower, module_obj, maybe_imports,
+ i::MaybeHandle<i::JSArrayBuffer>());
+ }
- Local<Value> instance;
- if (WebAssemblyInstantiateImpl(isolate, args[0], data).ToLocal(&instance)) {
- args.GetReturnValue().Set(instance);
+ i::Handle<i::JSObject> instance_obj;
+ if (!maybe_instance_obj.ToHandle(&instance_obj)) {
+ DCHECK(i_isolate->has_scheduled_exception());
+ return;
+ }
+
+ // The infrastructure for `new Foo` calls allocates an object, which is
+ // available here as {args.This()}. We're going to discard this object
+ // and use {instance_obj} instead, but it does have the correct prototype,
+ // which we must harvest from it. This makes a difference when the JS
+ // constructor function wasn't {WebAssembly.Instance} directly, but some
+ // subclass: {instance_obj} has {WebAssembly.Instance}'s prototype at this
+ // point, so we must overwrite that with the correct prototype for {Foo}.
+ if (!TransferPrototype(i_isolate, instance_obj,
+ Utils::OpenHandle(*args.This()))) {
+ return;
}
+
+ args.GetReturnValue().Set(Utils::ToLocal(instance_obj));
}
// WebAssembly.instantiateStreaming(Response | Promise<Response> [, imports])
@@ -1032,7 +1057,7 @@ bool GetOptionalIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
}
// Fetch 'initial' or 'minimum' property from object. If both are provided,
-// 'initial' is used.
+// a TypeError is thrown.
// TODO(aseemgarg): change behavior when the following bug is resolved:
// https://github.com/WebAssembly/js-types/issues/6
bool GetInitialOrMinimumProperty(v8::Isolate* isolate, ErrorThrower* thrower,
@@ -1045,13 +1070,27 @@ bool GetInitialOrMinimumProperty(v8::Isolate* isolate, ErrorThrower* thrower,
result, lower_bound, upper_bound)) {
return false;
}
- auto enabled_features = i::wasm::WasmFeatures::FromFlags();
- if (!has_initial && enabled_features.has_type_reflection()) {
+ auto enabled_features = i::wasm::WasmFeatures::FromIsolate(
+ reinterpret_cast<i::Isolate*>(isolate));
+ if (enabled_features.has_type_reflection()) {
+ bool has_minimum = false;
+ int64_t minimum = 0;
if (!GetOptionalIntegerProperty(isolate, thrower, context, object,
- v8_str(isolate, "minimum"), &has_initial,
- result, lower_bound, upper_bound)) {
+ v8_str(isolate, "minimum"), &has_minimum,
+ &minimum, lower_bound, upper_bound)) {
+ return false;
+ }
+ if (has_initial && has_minimum) {
+ thrower->TypeError(
+ "The properties 'initial' and 'minimum' are not allowed at the same "
+ "time");
return false;
}
+ if (has_minimum) {
+ // Only {minimum} exists, so we use {minimum} as {initial}.
+ has_initial = true;
+ *result = minimum;
+ }
}
if (!has_initial) {
// TODO(aseemgarg): update error message when the spec issue is resolved.
@@ -1061,6 +1100,19 @@ bool GetInitialOrMinimumProperty(v8::Isolate* isolate, ErrorThrower* thrower,
return true;
}
+namespace {
+i::Handle<i::Object> DefaultReferenceValue(i::Isolate* isolate,
+ i::wasm::ValueType type) {
+ if (type == i::wasm::kWasmFuncRef) {
+ return isolate->factory()->null_value();
+ }
+ if (type.is_reference()) {
+ return isolate->factory()->undefined_value();
+ }
+ UNREACHABLE();
+}
+} // namespace
+
// new WebAssembly.Table(args) -> WebAssembly.Table
void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
@@ -1086,7 +1138,7 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (!maybe.ToLocal(&value)) return;
v8::Local<v8::String> string;
if (!value->ToString(context).ToLocal(&string)) return;
- auto enabled_features = i::wasm::WasmFeatures::FromFlags();
+ auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
// The JS api uses 'anyfunc' instead of 'funcref'.
if (string->StringEquals(v8_str(isolate, "anyfunc"))) {
type = i::wasm::kWasmFuncRef;
@@ -1120,7 +1172,20 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Handle<i::WasmTableObject> table_obj =
i::WasmTableObject::New(i_isolate, i::Handle<i::WasmInstanceObject>(),
type, static_cast<uint32_t>(initial), has_maximum,
- static_cast<uint32_t>(maximum), &fixed_array);
+ static_cast<uint32_t>(maximum), &fixed_array,
+ DefaultReferenceValue(i_isolate, type));
+
+ // The infrastructure for `new Foo` calls allocates an object, which is
+ // available here as {args.This()}. We're going to discard this object
+ // and use {table_obj} instead, but it does have the correct prototype,
+ // which we must harvest from it. This makes a difference when the JS
+ // constructor function wasn't {WebAssembly.Table} directly, but some
+ // subclass: {table_obj} has {WebAssembly.Table}'s prototype at this
+ // point, so we must overwrite that with the correct prototype for {Foo}.
+ if (!TransferPrototype(i_isolate, table_obj,
+ Utils::OpenHandle(*args.This()))) {
+ return;
+ }
if (initial > 0 && args.Length() >= 2 && !args[1]->IsUndefined()) {
i::Handle<i::Object> element = Utils::OpenHandle(*args[1]);
@@ -1198,6 +1263,19 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
thrower.RangeError("could not allocate memory");
return;
}
+
+ // The infrastructure for `new Foo` calls allocates an object, which is
+ // available here as {args.This()}. We're going to discard this object
+ // and use {memory_obj} instead, but it does have the correct prototype,
+ // which we must harvest from it. This makes a difference when the JS
+ // constructor function wasn't {WebAssembly.Memory} directly, but some
+ // subclass: {memory_obj} has {WebAssembly.Memory}'s prototype at this
+ // point, so we must overwrite that with the correct prototype for {Foo}.
+ if (!TransferPrototype(i_isolate, memory_obj,
+ Utils::OpenHandle(*args.This()))) {
+ return;
+ }
+
if (shared == i::SharedFlag::kShared) {
i::Handle<i::JSArrayBuffer> buffer(
i::Handle<i::WasmMemoryObject>::cast(memory_obj)->array_buffer(),
@@ -1351,6 +1429,18 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
+ // The infrastructure for `new Foo` calls allocates an object, which is
+ // available here as {args.This()}. We're going to discard this object
+ // and use {global_obj} instead, but it does have the correct prototype,
+ // which we must harvest from it. This makes a difference when the JS
+ // constructor function wasn't {WebAssembly.Global} directly, but some
+ // subclass: {global_obj} has {WebAssembly.Global}'s prototype at this
+ // point, so we must overwrite that with the correct prototype for {Foo}.
+ if (!TransferPrototype(i_isolate, global_obj,
+ Utils::OpenHandle(*args.This()))) {
+ return;
+ }
+
// Convert value to a WebAssembly value, the default value is 0.
Local<v8::Value> value = Local<Value>::Cast(args[1]);
switch (type.kind()) {
@@ -1835,16 +1925,16 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- i::Handle<i::Object> init_value = i_isolate->factory()->null_value();
- auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
- if (enabled_features.has_typed_funcref()) {
- if (args.Length() >= 2 && !args[1]->IsUndefined()) {
- init_value = Utils::OpenHandle(*args[1]);
- }
+ i::Handle<i::Object> init_value;
+
+ if (args.Length() >= 2 && !args[1]->IsUndefined()) {
+ init_value = Utils::OpenHandle(*args[1]);
if (!i::WasmTableObject::IsValidElement(i_isolate, receiver, init_value)) {
thrower.TypeError("Argument 1 must be a valid type for the table");
return;
}
+ } else {
+ init_value = DefaultReferenceValue(i_isolate, receiver->type());
}
int old_size =
@@ -1902,7 +1992,12 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- i::Handle<i::Object> element = Utils::OpenHandle(*args[1]);
+ i::Handle<i::Object> element;
+ if (args.Length() >= 2) {
+ element = Utils::OpenHandle(*args[1]);
+ } else {
+ element = DefaultReferenceValue(i_isolate, table_object->type());
+ }
if (!i::WasmTableObject::IsValidElement(i_isolate, table_object, element)) {
thrower.TypeError(
"Argument 1 must be null or a WebAssembly function of type compatible "
@@ -1912,16 +2007,14 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::WasmTableObject::Set(i_isolate, table_object, index, element);
}
-// WebAssembly.Table.type(WebAssembly.Table) -> TableType
+// WebAssembly.Table.type() -> TableType
void WebAssemblyTableType(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
HandleScope scope(isolate);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Table.type()");
- auto maybe_table = GetFirstArgumentAsTable(args, &thrower);
- if (thrower.error()) return;
- i::Handle<i::WasmTableObject> table = maybe_table.ToHandleChecked();
+ EXTRACT_THIS(table, WasmTableObject);
base::Optional<uint32_t> max_size;
if (!table->maximum_length().IsUndefined()) {
uint64_t max_size64 = table->maximum_length().Number();
@@ -1994,16 +2087,14 @@ void WebAssemblyMemoryGetBuffer(
return_value.Set(Utils::ToLocal(buffer));
}
-// WebAssembly.Memory.type(WebAssembly.Memory) -> MemoryType
+// WebAssembly.Memory.type() -> MemoryType
void WebAssemblyMemoryType(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
HandleScope scope(isolate);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Memory.type()");
- auto maybe_memory = GetFirstArgumentAsMemory(args, &thrower);
- if (thrower.error()) return;
- i::Handle<i::WasmMemoryObject> memory = maybe_memory.ToHandleChecked();
+ EXTRACT_THIS(memory, WasmMemoryObject);
i::Handle<i::JSArrayBuffer> buffer(memory->array_buffer(), i_isolate);
size_t curr_size = buffer->byte_length() / i::wasm::kWasmPageSize;
DCHECK_LE(curr_size, std::numeric_limits<uint32_t>::max());
@@ -2014,7 +2105,8 @@ void WebAssemblyMemoryType(const v8::FunctionCallbackInfo<v8::Value>& args) {
DCHECK_LE(max_size64, std::numeric_limits<uint32_t>::max());
max_size.emplace(static_cast<uint32_t>(max_size64));
}
- auto type = i::wasm::GetTypeForMemory(i_isolate, min_size, max_size);
+ bool shared = buffer->is_shared();
+ auto type = i::wasm::GetTypeForMemory(i_isolate, min_size, max_size, shared);
args.GetReturnValue().Set(Utils::ToLocal(type));
}
@@ -2360,16 +2452,14 @@ void WebAssemblyGlobalSetValue(
}
}
-// WebAssembly.Global.type(WebAssembly.Global) -> GlobalType
+// WebAssembly.Global.type() -> GlobalType
void WebAssemblyGlobalType(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
HandleScope scope(isolate);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Global.type()");
- auto maybe_global = GetFirstArgumentAsGlobal(args, &thrower);
- if (thrower.error()) return;
- i::Handle<i::WasmGlobalObject> global = maybe_global.ToHandleChecked();
+ EXTRACT_THIS(global, WasmGlobalObject);
auto type = i::wasm::GetTypeForGlobal(i_isolate, global->is_mutable(),
global->type());
args.GetReturnValue().Set(Utils::ToLocal(type));
@@ -2594,7 +2684,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
SideEffectType::kHasNoSideEffect);
InstallFunc(isolate, table_proto, "set", WebAssemblyTableSet, 2);
if (enabled_features.has_type_reflection()) {
- InstallFunc(isolate, table_constructor, "type", WebAssemblyTableType, 1);
+ InstallFunc(isolate, table_proto, "type", WebAssemblyTableType, 0, false,
+ NONE, SideEffectType::kHasNoSideEffect);
}
JSObject::AddProperty(isolate, table_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Table"), ro_attributes);
@@ -2614,7 +2705,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
InstallFunc(isolate, memory_proto, "grow", WebAssemblyMemoryGrow, 1);
InstallGetter(isolate, memory_proto, "buffer", WebAssemblyMemoryGetBuffer);
if (enabled_features.has_type_reflection()) {
- InstallFunc(isolate, memory_constructor, "type", WebAssemblyMemoryType, 1);
+ InstallFunc(isolate, memory_proto, "type", WebAssemblyMemoryType, 0, false,
+ NONE, SideEffectType::kHasNoSideEffect);
}
JSObject::AddProperty(isolate, memory_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Memory"), ro_attributes);
@@ -2636,7 +2728,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
InstallGetterSetter(isolate, global_proto, "value", WebAssemblyGlobalGetValue,
WebAssemblyGlobalSetValue);
if (enabled_features.has_type_reflection()) {
- InstallFunc(isolate, global_constructor, "type", WebAssemblyGlobalType, 1);
+ InstallFunc(isolate, global_proto, "type", WebAssemblyGlobalType, 0, false,
+ NONE, SideEffectType::kHasNoSideEffect);
}
JSObject::AddProperty(isolate, global_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Global"), ro_attributes);
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index 756900c160..9bb3472138 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -290,15 +290,20 @@ void WasmModuleBuilder::AddDataSegment(const byte* data, uint32_t size,
}
}
-uint32_t WasmModuleBuilder::AddSignature(FunctionSig* sig) {
- auto sig_entry = signature_map_.find(*sig);
- if (sig_entry != signature_map_.end()) return sig_entry->second;
+uint32_t WasmModuleBuilder::ForceAddSignature(FunctionSig* sig,
+ uint32_t supertype) {
uint32_t index = static_cast<uint32_t>(types_.size());
signature_map_.emplace(*sig, index);
- types_.push_back(Type(sig));
+ types_.push_back(Type(sig, supertype));
return index;
}
+uint32_t WasmModuleBuilder::AddSignature(FunctionSig* sig, uint32_t supertype) {
+ auto sig_entry = signature_map_.find(*sig);
+ if (sig_entry != signature_map_.end()) return sig_entry->second;
+ return ForceAddSignature(sig, supertype);
+}
+
uint32_t WasmModuleBuilder::AddException(FunctionSig* type) {
DCHECK_EQ(0, type->return_count());
int type_index = AddSignature(type);
@@ -307,15 +312,16 @@ uint32_t WasmModuleBuilder::AddException(FunctionSig* type) {
return except_index;
}
-uint32_t WasmModuleBuilder::AddStructType(StructType* type) {
+uint32_t WasmModuleBuilder::AddStructType(StructType* type,
+ uint32_t supertype) {
uint32_t index = static_cast<uint32_t>(types_.size());
- types_.push_back(Type(type));
+ types_.push_back(Type(type, supertype));
return index;
}
-uint32_t WasmModuleBuilder::AddArrayType(ArrayType* type) {
+uint32_t WasmModuleBuilder::AddArrayType(ArrayType* type, uint32_t supertype) {
uint32_t index = static_cast<uint32_t>(types_.size());
- types_.push_back(Type(type));
+ types_.push_back(Type(type, supertype));
return index;
}
@@ -509,22 +515,49 @@ void WriteInitializerExpressionWithEnd(ZoneBuffer* buffer,
}
break;
}
+ case WasmInitExpr::kStructNew:
case WasmInitExpr::kStructNewWithRtt:
+ case WasmInitExpr::kStructNewDefault:
+ case WasmInitExpr::kStructNewDefaultWithRtt:
+ STATIC_ASSERT((kExprStructNew >> 8) == kGCPrefix);
STATIC_ASSERT((kExprStructNewWithRtt >> 8) == kGCPrefix);
+ STATIC_ASSERT((kExprStructNewDefault >> 8) == kGCPrefix);
+ STATIC_ASSERT((kExprStructNewDefaultWithRtt >> 8) == kGCPrefix);
for (const WasmInitExpr& operand : init.operands()) {
WriteInitializerExpressionWithEnd(buffer, operand, kWasmBottom);
}
buffer->write_u8(kGCPrefix);
- buffer->write_u8(static_cast<uint8_t>(kExprStructNewWithRtt));
+ WasmOpcode opcode;
+ switch (init.kind()) {
+ case WasmInitExpr::kStructNewWithRtt:
+ opcode = kExprStructNewWithRtt;
+ break;
+ case WasmInitExpr::kStructNew:
+ opcode = kExprStructNew;
+ break;
+ case WasmInitExpr::kStructNewDefaultWithRtt:
+ opcode = kExprStructNewDefaultWithRtt;
+ break;
+ case WasmInitExpr::kStructNewDefault:
+ opcode = kExprStructNewDefault;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ buffer->write_u8(static_cast<uint8_t>(opcode));
buffer->write_u32v(init.immediate().index);
break;
case WasmInitExpr::kArrayInit:
+ case WasmInitExpr::kArrayInitStatic:
STATIC_ASSERT((kExprArrayInit >> 8) == kGCPrefix);
+ STATIC_ASSERT((kExprArrayInitStatic >> 8) == kGCPrefix);
for (const WasmInitExpr& operand : init.operands()) {
WriteInitializerExpressionWithEnd(buffer, operand, kWasmBottom);
}
buffer->write_u8(kGCPrefix);
- buffer->write_u8(static_cast<uint8_t>(kExprArrayInit));
+ buffer->write_u8(static_cast<uint8_t>(
+ init.kind() == WasmInitExpr::kArrayInit ? kExprArrayInit
+ : kExprArrayInitStatic));
buffer->write_u32v(init.immediate().index);
buffer->write_u32v(static_cast<uint32_t>(init.operands().size() - 1));
break;
@@ -568,10 +601,12 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
buffer->write_size(types_.size());
for (const Type& type : types_) {
+ bool has_super = type.supertype != kNoSuperType;
switch (type.kind) {
case Type::kFunctionSig: {
FunctionSig* sig = type.sig;
- buffer->write_u8(kWasmFunctionTypeCode);
+ buffer->write_u8(has_super ? kWasmFunctionSubtypeCode
+ : kWasmFunctionTypeCode);
buffer->write_size(sig->parameter_count());
for (auto param : sig->parameters()) {
WriteValueType(buffer, param);
@@ -580,23 +615,40 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
for (auto ret : sig->returns()) {
WriteValueType(buffer, ret);
}
+ if (type.supertype == kGenericSuperType) {
+ buffer->write_u8(kFuncRefCode);
+ } else if (has_super) {
+ buffer->write_i32v(type.supertype);
+ }
break;
}
case Type::kStructType: {
StructType* struct_type = type.struct_type;
- buffer->write_u8(kWasmStructTypeCode);
+ buffer->write_u8(has_super ? kWasmStructSubtypeCode
+ : kWasmStructTypeCode);
buffer->write_size(struct_type->field_count());
for (uint32_t i = 0; i < struct_type->field_count(); i++) {
WriteValueType(buffer, struct_type->field(i));
buffer->write_u8(struct_type->mutability(i) ? 1 : 0);
}
+ if (type.supertype == kGenericSuperType) {
+ buffer->write_u8(kDataRefCode);
+ } else if (has_super) {
+ buffer->write_i32v(type.supertype);
+ }
break;
}
case Type::kArrayType: {
ArrayType* array_type = type.array_type;
- buffer->write_u8(kWasmArrayTypeCode);
+ buffer->write_u8(has_super ? kWasmArraySubtypeCode
+ : kWasmArrayTypeCode);
WriteValueType(buffer, array_type->element_type());
buffer->write_u8(array_type->mutability() ? 1 : 0);
+ if (type.supertype == kGenericSuperType) {
+ buffer->write_u8(kDataRefCode);
+ } else if (has_super) {
+ buffer->write_i32v(type.supertype);
+ }
break;
}
}
@@ -852,7 +904,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
// Emit the section string.
buffer->write_string(base::CStrVector("name"));
// Emit a subsection for the function names.
- buffer->write_u8(NameSectionKindCode::kFunction);
+ buffer->write_u8(NameSectionKindCode::kFunctionCode);
// Emit a placeholder for the subsection length.
size_t functions_start = buffer->reserve_u32v();
// Emit the function names.
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index 8eeac56afd..7ba140775d 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -331,10 +331,14 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
// size, or the maximum uint32_t value if the maximum table size has been
// exceeded.
uint32_t IncreaseTableMinSize(uint32_t table_index, uint32_t count);
- uint32_t AddSignature(FunctionSig* sig);
+ // Adds the signature to the module if it does not already exist.
+ uint32_t AddSignature(FunctionSig* sig, uint32_t supertype = kNoSuperType);
+ // Does not deduplicate function signatures.
+ uint32_t ForceAddSignature(FunctionSig* sig,
+ uint32_t supertype = kNoSuperType);
uint32_t AddException(FunctionSig* type);
- uint32_t AddStructType(StructType* type);
- uint32_t AddArrayType(ArrayType* type);
+ uint32_t AddStructType(StructType* type, uint32_t supertype = kNoSuperType);
+ uint32_t AddArrayType(ArrayType* type, uint32_t supertype = kNoSuperType);
uint32_t AddTable(ValueType type, uint32_t min_size);
uint32_t AddTable(ValueType type, uint32_t min_size, uint32_t max_size);
uint32_t AddTable(ValueType type, uint32_t min_size, uint32_t max_size,
@@ -399,13 +403,14 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
private:
struct Type {
enum Kind { kFunctionSig, kStructType, kArrayType };
- explicit Type(FunctionSig* signature)
- : kind(kFunctionSig), sig(signature) {}
- explicit Type(StructType* struct_type)
- : kind(kStructType), struct_type(struct_type) {}
- explicit Type(ArrayType* array_type)
- : kind(kArrayType), array_type(array_type) {}
+ explicit Type(FunctionSig* signature, uint32_t supertype)
+ : kind(kFunctionSig), supertype(supertype), sig(signature) {}
+ explicit Type(StructType* struct_type, uint32_t supertype)
+ : kind(kStructType), supertype(supertype), struct_type(struct_type) {}
+ explicit Type(ArrayType* array_type, uint32_t supertype)
+ : kind(kArrayType), supertype(supertype), array_type(array_type) {}
Kind kind;
+ uint32_t supertype;
union {
FunctionSig* sig;
StructType* struct_type;
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 97a31487ea..0035c00bf2 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -113,6 +113,23 @@ int GetContainingWasmFunction(const WasmModule* module, uint32_t byte_offset) {
return func_index;
}
+// TODO(7748): Measure whether this iterative implementation is fast enough.
+// We could cache the result on the module, in yet another vector indexed by
+// type index.
+int GetSubtypingDepth(const WasmModule* module, uint32_t type_index) {
+ uint32_t starting_point = type_index;
+ int depth = 0;
+ while ((type_index = module->supertype(type_index)) != kGenericSuperType) {
+ if (type_index == starting_point) return -1; // Cycle detected.
+ // This is disallowed and will be rejected by validation, but might occur
+ // when this function is called.
+ if (type_index == kNoSuperType) break;
+ depth++;
+ if (depth > static_cast<int>(kV8MaxRttSubtypingDepth)) break;
+ }
+ return depth;
+}
+
void LazilyGeneratedNames::AddForTesting(int function_index,
WireBytesRef name) {
base::MutexGuard lock(&mutex_);
@@ -293,19 +310,23 @@ Handle<JSObject> GetTypeForGlobal(Isolate* isolate, bool is_mutable,
}
Handle<JSObject> GetTypeForMemory(Isolate* isolate, uint32_t min_size,
- base::Optional<uint32_t> max_size) {
+ base::Optional<uint32_t> max_size,
+ bool shared) {
Factory* factory = isolate->factory();
Handle<JSFunction> object_function = isolate->object_function();
Handle<JSObject> object = factory->NewJSObject(object_function);
Handle<String> minimum_string = factory->InternalizeUtf8String("minimum");
Handle<String> maximum_string = factory->InternalizeUtf8String("maximum");
+ Handle<String> shared_string = factory->InternalizeUtf8String("shared");
JSObject::AddProperty(isolate, object, minimum_string,
factory->NewNumberFromUint(min_size), NONE);
if (max_size.has_value()) {
JSObject::AddProperty(isolate, object, maximum_string,
factory->NewNumberFromUint(max_size.value()), NONE);
}
+ JSObject::AddProperty(isolate, object, shared_string,
+ factory->ToBoolean(shared), NONE);
return object;
}
@@ -401,7 +422,8 @@ Handle<JSArray> GetImports(Isolate* isolate,
maximum_size.emplace(module->maximum_pages);
}
type_value =
- GetTypeForMemory(isolate, module->initial_pages, maximum_size);
+ GetTypeForMemory(isolate, module->initial_pages, maximum_size,
+ module->has_shared_memory);
}
import_kind = memory_string;
break;
@@ -498,7 +520,8 @@ Handle<JSArray> GetExports(Isolate* isolate,
maximum_size.emplace(module->maximum_pages);
}
type_value =
- GetTypeForMemory(isolate, module->initial_pages, maximum_size);
+ GetTypeForMemory(isolate, module->initial_pages, maximum_size,
+ module->has_shared_memory);
}
export_kind = memory_string;
break;
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index d1f874a908..08a88c4a8e 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -259,6 +259,11 @@ struct V8_EXPORT_PRIVATE WasmDebugSymbols {
struct WasmTable;
+// End of a chain of explicit supertypes.
+constexpr uint32_t kGenericSuperType = 0xFFFFFFFE;
+// Used for types that have no explicit supertype.
+constexpr uint32_t kNoSuperType = 0xFFFFFFFF;
+
// Static representation of a module.
struct V8_EXPORT_PRIVATE WasmModule {
std::unique_ptr<Zone> signature_zone;
@@ -288,6 +293,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
WireBytesRef name = {0, 0};
std::vector<TypeDefinition> types; // by type index
std::vector<uint8_t> type_kinds; // by type index
+ std::vector<uint32_t> supertypes; // by type index
// Map from each type index to the index of its corresponding canonical type.
// Note: right now, only functions are canonicalized, and arrays and structs
// map to themselves.
@@ -295,9 +301,10 @@ struct V8_EXPORT_PRIVATE WasmModule {
bool has_type(uint32_t index) const { return index < types.size(); }
- void add_signature(const FunctionSig* sig) {
+ void add_signature(const FunctionSig* sig, uint32_t supertype) {
types.push_back(TypeDefinition(sig));
type_kinds.push_back(kWasmFunctionTypeCode);
+ supertypes.push_back(supertype);
uint32_t canonical_id = sig ? signature_map.FindOrInsert(*sig) : 0;
canonicalized_type_ids.push_back(canonical_id);
}
@@ -309,9 +316,10 @@ struct V8_EXPORT_PRIVATE WasmModule {
return types[index].function_sig;
}
- void add_struct_type(const StructType* type) {
+ void add_struct_type(const StructType* type, uint32_t supertype) {
types.push_back(TypeDefinition(type));
type_kinds.push_back(kWasmStructTypeCode);
+ supertypes.push_back(supertype);
// No canonicalization for structs.
canonicalized_type_ids.push_back(0);
}
@@ -323,9 +331,10 @@ struct V8_EXPORT_PRIVATE WasmModule {
return types[index].struct_type;
}
- void add_array_type(const ArrayType* type) {
+ void add_array_type(const ArrayType* type, uint32_t supertype) {
types.push_back(TypeDefinition(type));
type_kinds.push_back(kWasmArrayTypeCode);
+ supertypes.push_back(supertype);
// No canonicalization for arrays.
canonicalized_type_ids.push_back(0);
}
@@ -337,6 +346,14 @@ struct V8_EXPORT_PRIVATE WasmModule {
return types[index].array_type;
}
+ uint32_t supertype(uint32_t index) const {
+ DCHECK(index < supertypes.size());
+ return supertypes[index];
+ }
+ bool has_supertype(uint32_t index) const {
+ return supertype(index) != kNoSuperType;
+ }
+
std::vector<WasmFunction> functions;
std::vector<WasmDataSegment> data_segments;
std::vector<WasmTable> tables;
@@ -418,6 +435,12 @@ int GetContainingWasmFunction(const WasmModule* module, uint32_t byte_offset);
// contained within a function.
int GetNearestWasmFunction(const WasmModule* module, uint32_t byte_offset);
+// Gets the explicitly defined subtyping depth for the given type.
+// Returns 0 if the type has no explicit supertype.
+// The result is capped to {kV8MaxRttSubtypingDepth + 1}.
+// Invalid cyclic hierarchies will return -1.
+int GetSubtypingDepth(const WasmModule* module, uint32_t type_index);
+
// Interface to the storage (wire bytes) of a wasm module.
// It is illegal for anyone receiving a ModuleWireBytes to store pointers based
// on module_bytes, as this storage is only guaranteed to be alive as long as
@@ -477,7 +500,8 @@ Handle<JSObject> GetTypeForFunction(Isolate* isolate, const FunctionSig* sig,
Handle<JSObject> GetTypeForGlobal(Isolate* isolate, bool is_mutable,
ValueType type);
Handle<JSObject> GetTypeForMemory(Isolate* isolate, uint32_t min_size,
- base::Optional<uint32_t> max_size);
+ base::Optional<uint32_t> max_size,
+ bool shared);
Handle<JSObject> GetTypeForTable(Isolate* isolate, ValueType type,
uint32_t min_size,
base::Optional<uint32_t> max_size);
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index f7e9f2a975..be6d7dd6f7 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -632,6 +632,22 @@ int WasmArray::SizeFor(Map map, int length) {
return kHeaderSize + RoundUp(element_size * length, kTaggedSize);
}
+uint32_t WasmArray::element_offset(uint32_t index) {
+ DCHECK_LE(index, length());
+ return WasmArray::kHeaderSize +
+ index * type()->element_type().element_size_bytes();
+}
+
+Address WasmArray::ElementAddress(uint32_t index) {
+ return ptr() + element_offset(index) - kHeapObjectTag;
+}
+
+ObjectSlot WasmArray::ElementSlot(uint32_t index) {
+ DCHECK_LE(index, length());
+ DCHECK(type()->element_type().is_reference());
+ return RawField(kHeaderSize + kTaggedSize * index);
+}
+
// static
Handle<Object> WasmArray::GetElement(Isolate* isolate, Handle<WasmArray> array,
uint32_t index) {
@@ -639,9 +655,8 @@ Handle<Object> WasmArray::GetElement(Isolate* isolate, Handle<WasmArray> array,
return isolate->factory()->undefined_value();
}
wasm::ValueType element_type = array->type()->element_type();
- uint32_t offset =
- WasmArray::kHeaderSize + index * element_type.element_size_bytes();
- return ReadValueAt(isolate, array, element_type, offset);
+ return ReadValueAt(isolate, array, element_type,
+ array->element_offset(index));
}
// static
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index a52dd7fbc5..8112221c28 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -12,6 +12,7 @@
#include "src/debug/debug-interface.h"
#include "src/logging/counters.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/struct-inl.h"
@@ -251,7 +252,7 @@ base::Vector<const uint8_t> WasmModuleObject::GetRawFunctionName(
Handle<WasmTableObject> WasmTableObject::New(
Isolate* isolate, Handle<WasmInstanceObject> instance, wasm::ValueType type,
uint32_t initial, bool has_maximum, uint32_t maximum,
- Handle<FixedArray>* entries) {
+ Handle<FixedArray>* entries, Handle<Object> initial_value) {
// TODO(7748): Make this work with other types when spec clears up.
{
const WasmModule* module =
@@ -260,9 +261,8 @@ Handle<WasmTableObject> WasmTableObject::New(
}
Handle<FixedArray> backing_store = isolate->factory()->NewFixedArray(initial);
- Object null = ReadOnlyRoots(isolate).null_value();
for (int i = 0; i < static_cast<int>(initial); ++i) {
- backing_store->set(i, null);
+ backing_store->set(i, *initial_value);
}
Handle<Object> max;
@@ -1688,18 +1688,6 @@ wasm::WasmValue WasmArray::GetElement(uint32_t index) {
}
}
-ObjectSlot WasmArray::ElementSlot(uint32_t index) {
- DCHECK_LE(index, length());
- DCHECK(type()->element_type().is_reference());
- return RawField(kHeaderSize + kTaggedSize * index);
-}
-
-Address WasmArray::ElementAddress(uint32_t index) {
- DCHECK_LE(index, length());
- return ptr() + WasmArray::kHeaderSize +
- index * type()->element_type().element_size_bytes() - kHeapObjectTag;
-}
-
// static
Handle<WasmTagObject> WasmTagObject::New(Isolate* isolate,
const wasm::FunctionSig* sig,
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index d34818109b..3c554575f1 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -182,9 +182,6 @@ class WasmModuleObject
class WasmTableObject
: public TorqueGeneratedWasmTableObject<WasmTableObject, JSObject> {
public:
- // Dispatched behavior.
- DECL_PRINTER(WasmTableObject)
-
inline wasm::ValueType type();
V8_EXPORT_PRIVATE static int Grow(Isolate* isolate,
@@ -194,7 +191,8 @@ class WasmTableObject
V8_EXPORT_PRIVATE static Handle<WasmTableObject> New(
Isolate* isolate, Handle<WasmInstanceObject> instance,
wasm::ValueType type, uint32_t initial, bool has_maximum,
- uint32_t maximum, Handle<FixedArray>* entries);
+ uint32_t maximum, Handle<FixedArray>* entries,
+ Handle<Object> initial_value);
V8_EXPORT_PRIVATE static void AddDispatchTable(
Isolate* isolate, Handle<WasmTableObject> table,
@@ -266,9 +264,6 @@ class WasmMemoryObject
public:
DECL_OPTIONAL_ACCESSORS(instances, WeakArrayList)
- // Dispatched behavior.
- DECL_PRINTER(WasmMemoryObject)
-
// Add an instance to the internal (weak) list.
V8_EXPORT_PRIVATE static void AddInstance(Isolate* isolate,
Handle<WasmMemoryObject> memory,
@@ -553,9 +548,6 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
class WasmTagObject
: public TorqueGeneratedWasmTagObject<WasmTagObject, JSObject> {
public:
- // Dispatched behavior.
- DECL_PRINTER(WasmTagObject)
-
// Checks whether the given {sig} has the same parameter types as the
// serialized signature stored within this tag object.
bool MatchesSignature(const wasm::FunctionSig* sig);
@@ -840,8 +832,6 @@ class WasmExceptionTag
V8_EXPORT_PRIVATE static Handle<WasmExceptionTag> New(Isolate* isolate,
int index);
- DECL_PRINTER(WasmExceptionTag)
-
TQ_OBJECT_CONSTRUCTORS(WasmExceptionTag)
};
@@ -935,8 +925,8 @@ class WasmArray : public TorqueGeneratedWasmArray<WasmArray, WasmObject> {
// Get the {ObjectSlot} corresponding to the element at {index}. Requires that
// this is a reference array.
- ObjectSlot ElementSlot(uint32_t index);
- wasm::WasmValue GetElement(uint32_t index);
+ inline ObjectSlot ElementSlot(uint32_t index);
+ V8_EXPORT_PRIVATE wasm::WasmValue GetElement(uint32_t index);
static inline int SizeFor(Map map, int length);
@@ -945,8 +935,9 @@ class WasmArray : public TorqueGeneratedWasmArray<WasmArray, WasmObject> {
Handle<WasmArray> array,
uint32_t index);
- // Returns the Address of the element at {index}.
- Address ElementAddress(uint32_t index);
+ // Returns the offset/Address of the element at {index}.
+ inline uint32_t element_offset(uint32_t index);
+ inline Address ElementAddress(uint32_t index);
static int MaxLength(const wasm::ArrayType* type) {
// The total object size must fit into a Smi, for filler objects. To make
diff --git a/deps/v8/src/wasm/wasm-opcodes-inl.h b/deps/v8/src/wasm/wasm-opcodes-inl.h
index 550d7f4671..1034b72d91 100644
--- a/deps/v8/src/wasm/wasm-opcodes-inl.h
+++ b/deps/v8/src/wasm/wasm-opcodes-inl.h
@@ -382,12 +382,16 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
// GC operations.
CASE_OP(StructNewWithRtt, "struct.new_with_rtt")
+ CASE_OP(StructNewDefaultWithRtt, "struct.new_default_with_rtt")
+ CASE_OP(StructNew, "struct.new")
CASE_OP(StructNewDefault, "struct.new_default")
CASE_OP(StructGet, "struct.get")
CASE_OP(StructGetS, "struct.get_s")
CASE_OP(StructGetU, "struct.get_u")
CASE_OP(StructSet, "struct.set")
CASE_OP(ArrayNewWithRtt, "array.new_with_rtt")
+ CASE_OP(ArrayNewDefaultWithRtt, "array.new_default_with_rtt")
+ CASE_OP(ArrayNew, "array.new")
CASE_OP(ArrayNewDefault, "array.new_default")
CASE_OP(ArrayGet, "array.get")
CASE_OP(ArrayGetS, "array.get_s")
@@ -396,6 +400,7 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(ArrayLen, "array.len")
CASE_OP(ArrayCopy, "array.copy")
CASE_OP(ArrayInit, "array.init")
+ CASE_OP(ArrayInitStatic, "array.init_static")
CASE_OP(I31New, "i31.new")
CASE_OP(I31GetS, "i31.get_s")
CASE_OP(I31GetU, "i31.get_u")
@@ -403,9 +408,13 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(RttSub, "rtt.sub")
CASE_OP(RttFreshSub, "rtt.fresh_sub")
CASE_OP(RefTest, "ref.test")
+ CASE_OP(RefTestStatic, "ref.test_static")
CASE_OP(RefCast, "ref.cast")
+ CASE_OP(RefCastStatic, "ref.cast_static")
CASE_OP(BrOnCast, "br_on_cast")
+ CASE_OP(BrOnCastStatic, "br_on_cast_static")
CASE_OP(BrOnCastFail, "br_on_cast_fail")
+ CASE_OP(BrOnCastStaticFail, "br_on_cast_static_fail")
CASE_OP(RefIsFunc, "ref.is_func")
CASE_OP(RefIsData, "ref.is_data")
CASE_OP(RefIsI31, "ref.is_i31")
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 50e813ad02..d920b7660b 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -650,13 +650,15 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
#define FOREACH_GC_OPCODE(V) \
V(StructNewWithRtt, 0xfb01, _) \
- V(StructNewDefault, 0xfb02, _) \
+ V(StructNewDefaultWithRtt, 0xfb02, _) \
V(StructGet, 0xfb03, _) \
V(StructGetS, 0xfb04, _) \
V(StructGetU, 0xfb05, _) \
V(StructSet, 0xfb06, _) \
+ V(StructNew, 0xfb07, _) \
+ V(StructNewDefault, 0xfb08, _) \
V(ArrayNewWithRtt, 0xfb11, _) \
- V(ArrayNewDefault, 0xfb12, _) \
+ V(ArrayNewDefaultWithRtt, 0xfb12, _) \
V(ArrayGet, 0xfb13, _) \
V(ArrayGetS, 0xfb14, _) \
V(ArrayGetU, 0xfb15, _) \
@@ -664,6 +666,9 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(ArrayLen, 0xfb17, _) \
V(ArrayCopy, 0xfb18, _) /* not standardized - V8 experimental */ \
V(ArrayInit, 0xfb19, _) /* not standardized - V8 experimental */ \
+ V(ArrayInitStatic, 0xfb1a, _) \
+ V(ArrayNew, 0xfb1b, _) \
+ V(ArrayNewDefault, 0xfb1c, _) \
V(I31New, 0xfb20, _) \
V(I31GetS, 0xfb21, _) \
V(I31GetU, 0xfb22, _) \
@@ -674,6 +679,10 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(RefCast, 0xfb41, _) \
V(BrOnCast, 0xfb42, _) \
V(BrOnCastFail, 0xfb43, _) \
+ V(RefTestStatic, 0xfb44, _) \
+ V(RefCastStatic, 0xfb45, _) \
+ V(BrOnCastStatic, 0xfb46, _) \
+ V(BrOnCastStaticFail, 0xfb47, _) \
V(RefIsFunc, 0xfb50, _) \
V(RefIsData, 0xfb51, _) \
V(RefIsI31, 0xfb52, _) \
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index d3165582c8..b0d697924e 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -303,7 +303,7 @@ NativeModuleSerializer::NativeModuleSerializer(
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
if (code == nullptr) return sizeof(bool);
- DCHECK_EQ(WasmCode::kFunction, code->kind());
+ DCHECK_EQ(WasmCode::kWasmFunction, code->kind());
if (code->tier() != ExecutionTier::kTurbofan) {
return sizeof(bool);
}
@@ -334,7 +334,7 @@ bool NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
writer->Write(false);
return true;
}
- DCHECK_EQ(WasmCode::kFunction, code->kind());
+ DCHECK_EQ(WasmCode::kWasmFunction, code->kind());
// Only serialize TurboFan code, as Liftoff code can contain breakpoints or
// non-relocatable constants.
if (code->tier() != ExecutionTier::kTurbofan) {
diff --git a/deps/v8/src/wasm/wasm-subtyping.cc b/deps/v8/src/wasm/wasm-subtyping.cc
index d2b7e9fe31..83b1bbe462 100644
--- a/deps/v8/src/wasm/wasm-subtyping.cc
+++ b/deps/v8/src/wasm/wasm-subtyping.cc
@@ -223,6 +223,8 @@ V8_INLINE bool EquivalentIndices(uint32_t index1, uint32_t index2,
}
}
+} // namespace
+
bool StructIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
const WasmModule* sub_module,
const WasmModule* super_module) {
@@ -234,8 +236,10 @@ bool StructIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
return false;
}
- TypeJudgementCache::instance()->cache_subtype(subtype_index, supertype_index,
- sub_module, super_module);
+ if (!sub_module->has_supertype(subtype_index)) {
+ TypeJudgementCache::instance()->cache_subtype(
+ subtype_index, supertype_index, sub_module, super_module);
+ }
for (uint32_t i = 0; i < super_struct->field_count(); i++) {
bool sub_mut = sub_struct->mutability(i);
bool super_mut = super_struct->mutability(i);
@@ -261,8 +265,10 @@ bool ArrayIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
super_module->types[supertype_index].array_type;
bool sub_mut = sub_array->mutability();
bool super_mut = super_array->mutability();
- TypeJudgementCache::instance()->cache_subtype(subtype_index, supertype_index,
- sub_module, super_module);
+ if (!sub_module->has_supertype(subtype_index)) {
+ TypeJudgementCache::instance()->cache_subtype(
+ subtype_index, supertype_index, sub_module, super_module);
+ }
if (sub_mut != super_mut ||
(sub_mut &&
!EquivalentTypes(sub_array->element_type(), super_array->element_type(),
@@ -294,8 +300,10 @@ bool FunctionIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
return false;
}
- TypeJudgementCache::instance()->cache_subtype(subtype_index, supertype_index,
- sub_module, super_module);
+ if (!sub_module->has_supertype(subtype_index)) {
+ TypeJudgementCache::instance()->cache_subtype(
+ subtype_index, supertype_index, sub_module, super_module);
+ }
for (uint32_t i = 0; i < sub_func->parameter_count(); i++) {
// Contravariance for params.
@@ -318,7 +326,6 @@ bool FunctionIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
return true;
}
-} // namespace
V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
ValueType subtype, ValueType supertype, const WasmModule* sub_module,
@@ -410,11 +417,35 @@ V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
DCHECK(super_heap.is_index());
uint32_t super_index = super_heap.ref_index();
DCHECK(super_module->has_type(super_index));
+ // The {IsSubtypeOf} entry point already has a fast path checking ValueType
+ // equality; here we catch (ref $x) being a subtype of (ref null $x).
+ if (sub_module == super_module && sub_index == super_index) return true;
uint8_t sub_kind = sub_module->type_kinds[sub_index];
if (sub_kind != super_module->type_kinds[super_index]) return false;
+ // Types with explicit supertypes just check those.
+ if (sub_module->has_supertype(sub_index)) {
+ // TODO(7748): Figure out cross-module story.
+ if (sub_module != super_module) return false;
+
+ uint32_t explicit_super = sub_module->supertype(sub_index);
+ while (true) {
+ if (explicit_super == super_index) return true;
+ // Reached the end of the explicitly defined inheritance chain.
+ if (explicit_super == kGenericSuperType) return false;
+ // Types without explicit supertype can't occur here, they would have
+ // failed validation.
+ DCHECK_NE(explicit_super, kNoSuperType);
+ explicit_super = sub_module->supertype(explicit_super);
+ }
+ } else {
+ // A structural type (without explicit supertype) is never a subtype of
+ // a nominal type (with explicit supertype).
+ if (super_module->has_supertype(super_index)) return false;
+ }
+
// Accessing the caches for subtyping and equivalence from multiple background
// threads is protected by a lock.
base::RecursiveMutexGuard type_cache_access(
diff --git a/deps/v8/src/wasm/wasm-subtyping.h b/deps/v8/src/wasm/wasm-subtyping.h
index 59e7935d1f..53232ca2c2 100644
--- a/deps/v8/src/wasm/wasm-subtyping.h
+++ b/deps/v8/src/wasm/wasm-subtyping.h
@@ -97,6 +97,20 @@ V8_INLINE bool IsHeapSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
// case another WasmModule gets allocated in the same address later.
void DeleteCachedTypeJudgementsForModule(const WasmModule* module);
+// Checks whether {subtype_index} is a legal subtype of {supertype_index}.
+// These are the same checks that {IsSubtypeOf} uses for comparing types without
+// explicitly given supertypes; for validating such explicit supertypes they
+// can be called directly.
+bool StructIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
+ const WasmModule* sub_module,
+ const WasmModule* super_module);
+bool ArrayIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
+ const WasmModule* sub_module,
+ const WasmModule* super_module);
+bool FunctionIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
+ const WasmModule* sub_module,
+ const WasmModule* super_module);
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/web-snapshot/web-snapshot.cc b/deps/v8/src/web-snapshot/web-snapshot.cc
index 3e2aa43067..06a09ad6a4 100644
--- a/deps/v8/src/web-snapshot/web-snapshot.cc
+++ b/deps/v8/src/web-snapshot/web-snapshot.cc
@@ -392,7 +392,7 @@ void WebSnapshotSerializer::SerializeMap(Handle<Map> map, uint32_t& id) {
PropertyDetails details =
map->instance_descriptors(kRelaxedLoad).GetDetails(i);
- if (details.location() != kField) {
+ if (details.location() != PropertyLocation::kField) {
Throw("Web snapshot: Properties which are not fields not supported");
return;
}
@@ -694,19 +694,16 @@ void WebSnapshotSerializer::WriteValue(Handle<Object> object,
serializer.WriteUint32(ValueType::DOUBLE);
serializer.WriteDouble(HeapNumber::cast(*object).value());
break;
- case JS_FUNCTION_TYPE: {
- Handle<JSFunction> function = Handle<JSFunction>::cast(object);
- FunctionKind kind = function->shared().kind();
- if (IsClassConstructor(kind)) {
- SerializeClass(function, id);
- serializer.WriteUint32(ValueType::CLASS_ID);
- } else {
- SerializeFunction(function, id);
- serializer.WriteUint32(ValueType::FUNCTION_ID);
- }
+ case JS_FUNCTION_TYPE:
+ SerializeFunction(Handle<JSFunction>::cast(object), id);
+ serializer.WriteUint32(ValueType::FUNCTION_ID);
+ serializer.WriteUint32(id);
+ break;
+ case JS_CLASS_CONSTRUCTOR_TYPE:
+ SerializeClass(Handle<JSFunction>::cast(object), id);
+ serializer.WriteUint32(ValueType::CLASS_ID);
serializer.WriteUint32(id);
break;
- }
case JS_OBJECT_TYPE:
SerializeObject(Handle<JSObject>::cast(object), id);
serializer.WriteUint32(ValueType::OBJECT_ID);
@@ -724,9 +721,9 @@ void WebSnapshotSerializer::WriteValue(Handle<Object> object,
return;
}
uint32_t pattern_id, flags_id;
- Handle<String> pattern = handle(regexp->Pattern(), isolate_);
+ Handle<String> pattern = handle(regexp->source(), isolate_);
Handle<String> flags_string =
- JSRegExp::StringFromFlags(isolate_, regexp->GetFlags());
+ JSRegExp::StringFromFlags(isolate_, regexp->flags());
SerializeString(pattern, pattern_id);
SerializeString(flags_string, flags_id);
serializer.WriteUint32(ValueType::REGEXP);
@@ -1285,7 +1282,7 @@ void WebSnapshotDeserializer::DeserializeObjects() {
ReadValue(value, wanted_representation, property_array, i);
// Read the representation from the map.
PropertyDetails details = descriptors->GetDetails(InternalIndex(i));
- CHECK_EQ(details.location(), kField);
+ CHECK_EQ(details.location(), PropertyLocation::kField);
CHECK_EQ(kData, details.kind());
Representation r = details.representation();
if (r.IsNone()) {
diff --git a/deps/v8/src/zone/accounting-allocator.cc b/deps/v8/src/zone/accounting-allocator.cc
index f6f72d4616..40b4756bd1 100644
--- a/deps/v8/src/zone/accounting-allocator.cc
+++ b/deps/v8/src/zone/accounting-allocator.cc
@@ -54,7 +54,8 @@ std::unique_ptr<v8::base::BoundedPageAllocator> CreateBoundedAllocator(
auto allocator = std::make_unique<v8::base::BoundedPageAllocator>(
platform_allocator, reservation_start, ZoneCompression::kReservationSize,
- kZonePageSize);
+ kZonePageSize,
+ base::PageInitializationMode::kAllocatedPagesCanBeUninitialized);
// Exclude first page from allocation to ensure that accesses through
// decompressed null pointer will seg-fault.
diff --git a/deps/v8/src/zone/zone.cc b/deps/v8/src/zone/zone.cc
index 57f198e9aa..42617aadb8 100644
--- a/deps/v8/src/zone/zone.cc
+++ b/deps/v8/src/zone/zone.cc
@@ -89,14 +89,8 @@ void Zone::DeleteAll() {
// Traverse the chained list of segments and return them all to the allocator.
while (current) {
Segment* next = current->next();
- size_t size = current->total_size();
-
- // Un-poison the segment content so we can re-use or zap it later.
- ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void*>(current->start()),
- current->capacity());
-
- segment_bytes_allocated_ -= size;
- allocator_->ReturnSegment(current, supports_compression());
+ segment_bytes_allocated_ -= current->total_size();
+ ReleaseSegment(current);
current = next;
}
@@ -107,6 +101,13 @@ void Zone::DeleteAll() {
#endif
}
+void Zone::ReleaseSegment(Segment* segment) {
+ // Un-poison the segment content so we can re-use or zap it later.
+ ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void*>(segment->start()),
+ segment->capacity());
+ allocator_->ReturnSegment(segment, supports_compression());
+}
+
Address Zone::NewExpand(size_t size) {
// Make sure the requested size is already properly aligned and that
// there isn't enough room in the Zone to satisfy the request.
@@ -168,5 +169,48 @@ Address Zone::NewExpand(size_t size) {
return result;
}
+ZoneScope::ZoneScope(Zone* zone)
+ : zone_(zone),
+#ifdef V8_ENABLE_PRECISE_ZONE_STATS
+ allocation_size_for_tracing_(zone->allocation_size_for_tracing_),
+ freed_size_for_tracing_(zone->freed_size_for_tracing_),
+#endif
+ allocation_size_(zone->allocation_size_),
+ segment_bytes_allocated_(zone->segment_bytes_allocated_),
+ position_(zone->position_),
+ limit_(zone->limit_),
+ segment_head_(zone->segment_head_) {
+}
+
+ZoneScope::~ZoneScope() {
+ // Release segments up to the stored segment_head_.
+ Segment* current = zone_->segment_head_;
+ while (current != segment_head_) {
+ Segment* next = current->next();
+ zone_->ReleaseSegment(current);
+ current = next;
+ }
+
+ // Un-poison the trailing segment content so we can re-use or zap it later.
+ if (segment_head_ != nullptr) {
+ void* const start = reinterpret_cast<void*>(position_);
+ DCHECK_GE(start, reinterpret_cast<void*>(current->start()));
+ DCHECK_LE(start, reinterpret_cast<void*>(current->end()));
+ const size_t length = current->end() - reinterpret_cast<Address>(start);
+ ASAN_UNPOISON_MEMORY_REGION(start, length);
+ }
+
+ // Reset the Zone to the stored state.
+ zone_->allocation_size_ = allocation_size_;
+ zone_->segment_bytes_allocated_ = segment_bytes_allocated_;
+ zone_->position_ = position_;
+ zone_->limit_ = limit_;
+ zone_->segment_head_ = segment_head_;
+#ifdef V8_ENABLE_PRECISE_ZONE_STATS
+ zone_->allocation_size_for_tracing_ = allocation_size_for_tracing_;
+ zone_->freed_size_for_tracing_ = freed_size_for_tracing_;
+#endif
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/zone/zone.h b/deps/v8/src/zone/zone.h
index aa76091621..ef2f0b3dc8 100644
--- a/deps/v8/src/zone/zone.h
+++ b/deps/v8/src/zone/zone.h
@@ -189,6 +189,10 @@ class V8_EXPORT_PRIVATE Zone final {
// Deletes all objects and free all memory allocated in the Zone.
void DeleteAll();
+ // Releases the current segment without performing any local bookkeeping
+ // (e.g. tracking allocated bytes, maintaining linked lists, etc).
+ void ReleaseSegment(Segment* segment);
+
// All pointers returned from New() are 8-byte aligned.
static const size_t kAlignmentInBytes = 8;
@@ -235,6 +239,30 @@ class V8_EXPORT_PRIVATE Zone final {
// The number of bytes freed in this zone so far.
size_t freed_size_for_tracing_ = 0;
#endif
+
+ friend class ZoneScope;
+};
+
+// Similar to the HandleScope, the ZoneScope defines a region of validity for
+// zone memory. All memory allocated in the given Zone during the scope's
+// lifetime is freed when the scope is destructed, i.e. the Zone is reset to
+// the state it was in when the scope was created.
+class ZoneScope final {
+ public:
+ explicit ZoneScope(Zone* zone);
+ ~ZoneScope();
+
+ private:
+ Zone* const zone_;
+#ifdef V8_ENABLE_PRECISE_ZONE_STATS
+ const size_t allocation_size_for_tracing_;
+ const size_t freed_size_for_tracing_;
+#endif
+ const size_t allocation_size_;
+ const size_t segment_bytes_allocated_;
+ const Address position_;
+ const Address limit_;
+ Segment* const segment_head_;
};
// ZoneObject is an abstraction that helps define classes of objects
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 7f04173489..1a8de4c040 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -122,7 +122,7 @@ void CcTest::Run() {
DCHECK_EQ(active_isolates, i::Isolate::non_disposed_isolates());
#endif // DEBUG
if (initialize_) {
- if (v8::Locker::IsActive()) {
+ if (v8::Locker::WasEverUsed()) {
v8::Locker locker(isolate_);
EmptyMessageQueues(isolate_);
} else {
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 9c28520ed5..a52dc4324a 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -136,9 +136,6 @@
'test-strings/Traverse': [PASS, HEAVY],
'test-swiss-name-dictionary-csa/DeleteAtBoundaries': [PASS, HEAVY],
'test-swiss-name-dictionary-csa/SameH2': [PASS, HEAVY],
-
- # TODO(v8:11382): Reenable once irregexp is reentrant.
- 'test-regexp/RegExpInterruptReentrantExecution': [FAIL],
}], # ALWAYS
##############################################################################
@@ -385,6 +382,10 @@
'test-serialize/StartupSerializerTwice': [SKIP],
'test-serialize/StartupSerializerOnceRunScript': [SKIP],
'test-serialize/StartupSerializerTwiceRunScript': [SKIP],
+
+ # The uint32 values are sign-extended on MIPS64.
+ 'test-run-load-store/RunLoadStoreZeroExtend64': [SKIP],
+ 'test-run-load-store/RunUnalignedLoadStoreZeroExtend64': [SKIP],
}], # 'arch == mips64el or arch == mips64'
##############################################################################
@@ -427,6 +428,7 @@
'test-cpu-profiler/CrossScriptInliningCallerLineNumbers2': [SKIP],
# SIMD not fully implemented yet.
+ 'test-run-wasm-relaxed-simd/*': [SKIP],
'test-run-wasm-simd/RunWasm_F64x2ExtractLaneWithI64x2_liftoff': [SKIP],
'test-run-wasm-simd/RunWasm_I64x2ExtractWithF64x2_liftoff': [SKIP],
'test-run-wasm-simd-liftoff/*': [SKIP],
@@ -440,9 +442,14 @@
'test-run-wasm-64/*': [SKIP],
'test-run-wasm/RunWasmTurbofan_Select_s128_parameters': [SKIP],
'test-run-wasm/RunWasmLiftoff_Select_s128_parameters': [SKIP],
- 'test-liftoff-for-fuzzing/NondeterminismUnopF64x2': [SKIP],
- 'test-liftoff-for-fuzzing/NondeterminismUnopF32x2': [SKIP],
- 'test-liftoff-for-fuzzing/NondeterminismUnopF32x4': [SKIP],
+ 'test-liftoff-for-fuzzing/NondeterminismUnopF64x2AllNaN': [SKIP],
+ 'test-liftoff-for-fuzzing/NondeterminismUnopF64x2OneNaN': [SKIP],
+ 'test-liftoff-for-fuzzing/NondeterminismUnopF32x4AllNaN': [SKIP],
+ 'test-liftoff-for-fuzzing/NondeterminismUnopF32x4OneNaN': [SKIP],
+
+ # riscv64 sign extend on uint32.
+ 'test-run-load-store/RunUnalignedLoadStoreZeroExtend64': [SKIP],
+ 'test-run-load-store/RunLoadStoreZeroExtend64': [SKIP],
}], # 'arch == riscv64'
##############################################################################
@@ -509,9 +516,10 @@
'test-wasm-breakpoints/*' : [SKIP],
- # SIMD not fully implemented yet
+ # SIMD / Liftoff not fully implemented yet
'test-run-wasm-simd-liftoff/*': [SKIP],
'test-gc/RunWasmLiftoff_RefTrivialCasts': [SKIP],
+ 'test-gc/RunWasmLiftoff_RefTrivialCastsStatic': [SKIP],
# TODO(11856): Port nondeterminism detection.
'test-liftoff-for-fuzzing/*': [SKIP],
@@ -526,6 +534,8 @@
'test-gc/RunWasmLiftoff*': [SKIP],
'test-run-wasm-module/Run_WasmModule_CompilationHintsNoTiering': [SKIP],
'test-wasm-serialization/TierDownAfterDeserialization': [SKIP],
+ 'test-streaming-compilation/SingleThreadedTestIncrementalCaching': [SKIP],
+ 'test-streaming-compilation/AsyncTestIncrementalCaching': [SKIP],
}],
##############################################################################
@@ -756,14 +766,19 @@
##############################################################################
['no_simd_hardware == True', {
- 'test-run-wasm-simd/*': [SKIP],
+ 'test-run-wasm-relaxed-simd/*': [SKIP],
'test-run-wasm-simd-liftoff/*': [SKIP],
+ 'test-run-wasm-simd/*': [SKIP],
'test-gc/RunWasmLiftoff_RefTrivialCasts': [SKIP],
'test-gc/RunWasmTurbofan_RefTrivialCasts': [SKIP],
+ 'test-gc/RunWasmLiftoff_RefTrivialCastsStatic': [SKIP],
+ 'test-gc/RunWasmTurbofan_RefTrivialCastsStatic': [SKIP],
'test-run-wasm/RunWasmLiftoff_Select_s128_parameters': [SKIP],
'test-run-wasm/RunWasmTurbofan_Select_s128_parameters': [SKIP],
- 'test-liftoff-for-fuzzing/NondeterminismUnopF32x4': [SKIP],
- 'test-liftoff-for-fuzzing/NondeterminismUnopF64x2': [SKIP],
+ 'test-liftoff-for-fuzzing/NondeterminismUnopF32x4AllNaN': [SKIP],
+ 'test-liftoff-for-fuzzing/NondeterminismUnopF32x4OneNaN': [SKIP],
+ 'test-liftoff-for-fuzzing/NondeterminismUnopF64x2AllNaN': [SKIP],
+ 'test-liftoff-for-fuzzing/NondeterminismUnopF64x2OneNaN': [SKIP],
}], # no_simd_hardware == True
################################################################################
@@ -1227,4 +1242,13 @@
'test-calls-with-arraylike-or-spread/*': [SKIP],
}],
+################################################################################
+['variant == stress', {
+ # The 'stress' variants sets the '--stress-opt' d8 flag, which executes 2 runs
+ # in debug mode and 5 runs in release mode. Hence the module will be cached
+ # between runs, and the correct caching behavior cannot be observed anymore in
+ # the later runs.
+ 'test-streaming-compilation/AsyncTestIncrementalCaching': [SKIP],
+ 'test-streaming-compilation/SingleThreadedTestIncrementalCaching': [SKIP],
+}],
]
diff --git a/deps/v8/test/cctest/compiler/function-tester.cc b/deps/v8/test/cctest/compiler/function-tester.cc
index 63df42cfb2..5cbdcd1b3b 100644
--- a/deps/v8/test/cctest/compiler/function-tester.cc
+++ b/deps/v8/test/cctest/compiler/function-tester.cc
@@ -141,9 +141,9 @@ Handle<JSFunction> FunctionTester::ForMachineGraph(Graph* graph,
p, p.GetIsolate()); // allocated in outer handle scope.
}
-Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) {
+Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> f) {
Zone zone(isolate->allocator(), ZONE_NAME);
- return Optimize(function, &zone, isolate, flags_);
+ return Optimize(f, &zone, isolate, flags_);
}
// Compile the given machine graph instead of the source of the function
diff --git a/deps/v8/test/cctest/compiler/test-branch-combine.cc b/deps/v8/test/cctest/compiler/test-branch-combine.cc
index 25914222de..7530d010d8 100644
--- a/deps/v8/test/cctest/compiler/test-branch-combine.cc
+++ b/deps/v8/test/cctest/compiler/test-branch-combine.cc
@@ -471,9 +471,9 @@ TEST(BranchCombineInt32AddLessThanZero) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
MachineType::Int32());
- Node* a = m.Parameter(0);
- Node* b = m.Parameter(1);
- Node* add = m.Int32Add(a, b);
+ Node* p0 = m.Parameter(0);
+ Node* p1 = m.Parameter(1);
+ Node* add = m.Int32Add(p0, p1);
Node* compare = m.Int32LessThan(add, m.Int32Constant(0));
RawMachineLabel blocka, blockb;
@@ -498,9 +498,9 @@ TEST(BranchCombineInt32AddGreaterThanOrEqualZero) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
MachineType::Int32());
- Node* a = m.Parameter(0);
- Node* b = m.Parameter(1);
- Node* add = m.Int32Add(a, b);
+ Node* p0 = m.Parameter(0);
+ Node* p1 = m.Parameter(1);
+ Node* add = m.Int32Add(p0, p1);
Node* compare = m.Int32GreaterThanOrEqual(add, m.Int32Constant(0));
RawMachineLabel blocka, blockb;
@@ -525,9 +525,9 @@ TEST(BranchCombineInt32ZeroGreaterThanAdd) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
MachineType::Int32());
- Node* a = m.Parameter(0);
- Node* b = m.Parameter(1);
- Node* add = m.Int32Add(a, b);
+ Node* p0 = m.Parameter(0);
+ Node* p1 = m.Parameter(1);
+ Node* add = m.Int32Add(p0, p1);
Node* compare = m.Int32GreaterThan(m.Int32Constant(0), add);
RawMachineLabel blocka, blockb;
@@ -552,9 +552,9 @@ TEST(BranchCombineInt32ZeroLessThanOrEqualAdd) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
MachineType::Int32());
- Node* a = m.Parameter(0);
- Node* b = m.Parameter(1);
- Node* add = m.Int32Add(a, b);
+ Node* p0 = m.Parameter(0);
+ Node* p1 = m.Parameter(1);
+ Node* add = m.Int32Add(p0, p1);
Node* compare = m.Int32LessThanOrEqual(m.Int32Constant(0), add);
RawMachineLabel blocka, blockb;
@@ -579,9 +579,9 @@ TEST(BranchCombineUint32AddLessThanOrEqualZero) {
RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
MachineType::Uint32());
- Node* a = m.Parameter(0);
- Node* b = m.Parameter(1);
- Node* add = m.Int32Add(a, b);
+ Node* p0 = m.Parameter(0);
+ Node* p1 = m.Parameter(1);
+ Node* add = m.Int32Add(p0, p1);
Node* compare = m.Uint32LessThanOrEqual(add, m.Int32Constant(0));
RawMachineLabel blocka, blockb;
@@ -605,9 +605,9 @@ TEST(BranchCombineUint32AddGreaterThanZero) {
RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
MachineType::Uint32());
- Node* a = m.Parameter(0);
- Node* b = m.Parameter(1);
- Node* add = m.Int32Add(a, b);
+ Node* p0 = m.Parameter(0);
+ Node* p1 = m.Parameter(1);
+ Node* add = m.Int32Add(p0, p1);
Node* compare = m.Uint32GreaterThan(add, m.Int32Constant(0));
RawMachineLabel blocka, blockb;
@@ -631,9 +631,9 @@ TEST(BranchCombineUint32ZeroGreaterThanOrEqualAdd) {
RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
MachineType::Uint32());
- Node* a = m.Parameter(0);
- Node* b = m.Parameter(1);
- Node* add = m.Int32Add(a, b);
+ Node* p0 = m.Parameter(0);
+ Node* p1 = m.Parameter(1);
+ Node* add = m.Int32Add(p0, p1);
Node* compare = m.Uint32GreaterThanOrEqual(m.Int32Constant(0), add);
RawMachineLabel blocka, blockb;
@@ -657,9 +657,9 @@ TEST(BranchCombineUint32ZeroLessThanAdd) {
RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
MachineType::Uint32());
- Node* a = m.Parameter(0);
- Node* b = m.Parameter(1);
- Node* add = m.Int32Add(a, b);
+ Node* p0 = m.Parameter(0);
+ Node* p1 = m.Parameter(1);
+ Node* add = m.Int32Add(p0, p1);
Node* compare = m.Uint32LessThan(m.Int32Constant(0), add);
RawMachineLabel blocka, blockb;
@@ -683,9 +683,9 @@ TEST(BranchCombineWord32AndLessThanZero) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
MachineType::Int32());
- Node* a = m.Parameter(0);
- Node* b = m.Parameter(1);
- Node* add = m.Word32And(a, b);
+ Node* p0 = m.Parameter(0);
+ Node* p1 = m.Parameter(1);
+ Node* add = m.Word32And(p0, p1);
Node* compare = m.Int32LessThan(add, m.Int32Constant(0));
RawMachineLabel blocka, blockb;
@@ -709,9 +709,9 @@ TEST(BranchCombineWord32AndGreaterThanOrEqualZero) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
MachineType::Int32());
- Node* a = m.Parameter(0);
- Node* b = m.Parameter(1);
- Node* add = m.Word32And(a, b);
+ Node* p0 = m.Parameter(0);
+ Node* p1 = m.Parameter(1);
+ Node* add = m.Word32And(p0, p1);
Node* compare = m.Int32GreaterThanOrEqual(add, m.Int32Constant(0));
RawMachineLabel blocka, blockb;
@@ -735,9 +735,9 @@ TEST(BranchCombineInt32ZeroGreaterThanAnd) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
MachineType::Int32());
- Node* a = m.Parameter(0);
- Node* b = m.Parameter(1);
- Node* add = m.Word32And(a, b);
+ Node* p0 = m.Parameter(0);
+ Node* p1 = m.Parameter(1);
+ Node* add = m.Word32And(p0, p1);
Node* compare = m.Int32GreaterThan(m.Int32Constant(0), add);
RawMachineLabel blocka, blockb;
@@ -761,9 +761,9 @@ TEST(BranchCombineInt32ZeroLessThanOrEqualAnd) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
MachineType::Int32());
- Node* a = m.Parameter(0);
- Node* b = m.Parameter(1);
- Node* add = m.Word32And(a, b);
+ Node* p0 = m.Parameter(0);
+ Node* p1 = m.Parameter(1);
+ Node* add = m.Word32And(p0, p1);
Node* compare = m.Int32LessThanOrEqual(m.Int32Constant(0), add);
RawMachineLabel blocka, blockb;
@@ -787,9 +787,9 @@ TEST(BranchCombineUint32AndLessThanOrEqualZero) {
RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
MachineType::Uint32());
- Node* a = m.Parameter(0);
- Node* b = m.Parameter(1);
- Node* add = m.Word32And(a, b);
+ Node* p0 = m.Parameter(0);
+ Node* p1 = m.Parameter(1);
+ Node* add = m.Word32And(p0, p1);
Node* compare = m.Uint32LessThanOrEqual(add, m.Int32Constant(0));
RawMachineLabel blocka, blockb;
@@ -813,9 +813,9 @@ TEST(BranchCombineUint32AndGreaterThanZero) {
RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
MachineType::Uint32());
- Node* a = m.Parameter(0);
- Node* b = m.Parameter(1);
- Node* add = m.Word32And(a, b);
+ Node* p0 = m.Parameter(0);
+ Node* p1 = m.Parameter(1);
+ Node* add = m.Word32And(p0, p1);
Node* compare = m.Uint32GreaterThan(add, m.Int32Constant(0));
RawMachineLabel blocka, blockb;
@@ -839,9 +839,9 @@ TEST(BranchCombineUint32ZeroGreaterThanOrEqualAnd) {
RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
MachineType::Uint32());
- Node* a = m.Parameter(0);
- Node* b = m.Parameter(1);
- Node* add = m.Word32And(a, b);
+ Node* p0 = m.Parameter(0);
+ Node* p1 = m.Parameter(1);
+ Node* add = m.Word32And(p0, p1);
Node* compare = m.Uint32GreaterThanOrEqual(m.Int32Constant(0), add);
RawMachineLabel blocka, blockb;
@@ -865,9 +865,9 @@ TEST(BranchCombineUint32ZeroLessThanAnd) {
RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
MachineType::Uint32());
- Node* a = m.Parameter(0);
- Node* b = m.Parameter(1);
- Node* add = m.Word32And(a, b);
+ Node* p0 = m.Parameter(0);
+ Node* p1 = m.Parameter(1);
+ Node* add = m.Word32And(p0, p1);
Node* compare = m.Uint32LessThan(m.Int32Constant(0), add);
RawMachineLabel blocka, blockb;
diff --git a/deps/v8/test/cctest/compiler/test-calls-with-arraylike-or-spread.cc b/deps/v8/test/cctest/compiler/test-calls-with-arraylike-or-spread.cc
index b885fc0392..b5a82bbf66 100644
--- a/deps/v8/test/cctest/compiler/test-calls-with-arraylike-or-spread.cc
+++ b/deps/v8/test/cctest/compiler/test-calls-with-arraylike-or-spread.cc
@@ -18,7 +18,7 @@ void CompileRunWithNodeObserver(const std::string& js_code,
IrOpcode::Value updated_call_opcode2) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
+ v8::HandleScope handle_scope(isolate);
FLAG_allow_natives_syntax = true;
FLAG_turbo_optimize_apply = true;
diff --git a/deps/v8/test/cctest/compiler/test-gap-resolver.cc b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
index e032654183..faa0367d71 100644
--- a/deps/v8/test/cctest/compiler/test-gap-resolver.cc
+++ b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
@@ -235,12 +235,12 @@ class ParallelMoveCreator : public HandleAndZoneScope {
// destinations set with the float equivalents of the operand and check
// that all destinations are unique and do not alias each other.
if (!kSimpleFPAliasing && mo.destination().IsFPLocationOperand()) {
- std::vector<InstructionOperand> fragments;
- GetCanonicalOperands(dst, &fragments);
- CHECK(!fragments.empty());
- for (size_t i = 0; i < fragments.size(); ++i) {
- if (destinations.find(fragments[i]) == destinations.end()) {
- destinations.insert(fragments[i]);
+ std::vector<InstructionOperand> dst_fragments;
+ GetCanonicalOperands(dst, &dst_fragments);
+ CHECK(!dst_fragments.empty());
+ for (size_t j = 0; j < dst_fragments.size(); ++j) {
+ if (destinations.find(dst_fragments[j]) == destinations.end()) {
+ destinations.insert(dst_fragments[j]);
} else {
reject = true;
break;
@@ -250,18 +250,18 @@ class ParallelMoveCreator : public HandleAndZoneScope {
// representations.
const InstructionOperand& src = mo.source();
if (src.IsFPRegister()) {
- std::vector<InstructionOperand> fragments;
+ std::vector<InstructionOperand> src_fragments;
MachineRepresentation src_rep =
LocationOperand::cast(src).representation();
- GetCanonicalOperands(src, &fragments);
- CHECK(!fragments.empty());
- for (size_t i = 0; i < fragments.size(); ++i) {
- auto find_it = sources.find(fragments[i]);
+ GetCanonicalOperands(src, &src_fragments);
+ CHECK(!src_fragments.empty());
+ for (size_t j = 0; j < src_fragments.size(); ++j) {
+ auto find_it = sources.find(src_fragments[j]);
if (find_it != sources.end() && find_it->second != src_rep) {
reject = true;
break;
}
- sources.insert(std::make_pair(fragments[i], src_rep));
+ sources.insert(std::make_pair(src_fragments[j], src_rep));
}
}
} else {
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index b911164f3f..3f3f406b0f 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -765,8 +765,8 @@ TEST(RemoveToNumberEffects) {
if (effect_use != nullptr) {
R.CheckEffectInput(R.start(), effect_use);
// Check that value uses of ToNumber() do not go to start().
- for (int i = 0; i < effect_use->op()->ValueInputCount(); i++) {
- CHECK_NE(R.start(), effect_use->InputAt(i));
+ for (int j = 0; j < effect_use->op()->ValueInputCount(); j++) {
+ CHECK_NE(R.start(), effect_use->InputAt(j));
}
}
}
@@ -843,8 +843,8 @@ void CheckEqualityReduction(JSTypedLoweringTester* R, bool strict, Node* l,
const Operator* op = strict ? R->javascript.StrictEqual(feedback_source)
: R->javascript.Equal(feedback_source);
Node* eq = R->Binop(op, p0, p1);
- Node* r = R->reduce(eq);
- R->CheckBinop(expected, r);
+ Node* reduced = R->reduce(eq);
+ R->CheckBinop(expected, reduced);
}
}
}
diff --git a/deps/v8/test/cctest/compiler/test-loop-analysis.cc b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
index a6df09ead3..fe5009f231 100644
--- a/deps/v8/test/cctest/compiler/test-loop-analysis.cc
+++ b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
@@ -119,7 +119,6 @@ class LoopFinderTester : HandleAndZoneScope {
}
Node* Return(Node* val, Node* effect, Node* control) {
- Node* zero = graph.NewNode(common.Int32Constant(0));
Node* ret = graph.NewNode(common.Return(), zero, val, effect, control);
end->ReplaceInput(0, ret);
return ret;
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index d975cb31f2..956f4a4e31 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -680,12 +680,12 @@ TEST(CodeGenInt32Binop) {
for (size_t i = 0; i < arraysize(kOps); ++i) {
for (int j = 0; j < 8; j++) {
for (int k = 0; k < 8; k++) {
- RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ RawMachineAssemblerTester<int32_t> t(MachineType::Int32(),
MachineType::Int32());
- Node* a = Int32Input(&m, j);
- Node* b = Int32Input(&m, k);
- m.Return(m.AddNode(kOps[i], a, b));
- m.GenerateCode();
+ Node* a = Int32Input(&t, j);
+ Node* b = Int32Input(&t, k);
+ t.Return(t.AddNode(kOps[i], a, b));
+ t.GenerateCode();
}
}
}
@@ -741,12 +741,12 @@ TEST(CodeGenInt64Binop) {
for (size_t i = 0; i < arraysize(kOps); ++i) {
for (int j = 0; j < 8; j++) {
for (int k = 0; k < 8; k++) {
- RawMachineAssemblerTester<int64_t> m(MachineType::Int64(),
+ RawMachineAssemblerTester<int64_t> t(MachineType::Int64(),
MachineType::Int64());
- Node* a = Int64Input(&m, j);
- Node* b = Int64Input(&m, k);
- m.Return(m.AddNode(kOps[i], a, b));
- m.GenerateCode();
+ Node* a = Int64Input(&t, j);
+ Node* b = Int64Input(&t, k);
+ t.Return(t.AddNode(kOps[i], a, b));
+ t.GenerateCode();
}
}
}
@@ -1814,18 +1814,18 @@ TEST(RunInt32AddInBranch) {
m.machine()->Word32Shl(),
m.machine()->Word32Shr()};
for (size_t n = 0; n < arraysize(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(
+ RawMachineAssemblerTester<int32_t> t(
MachineType::Uint32(), MachineType::Int32(), MachineType::Uint32());
RawMachineLabel blocka, blockb;
- m.Branch(m.Word32Equal(m.Int32Add(m.Parameter(0),
- m.AddNode(shops[n], m.Parameter(1),
- m.Parameter(2))),
- m.Int32Constant(0)),
+ t.Branch(t.Word32Equal(t.Int32Add(t.Parameter(0),
+ t.AddNode(shops[n], t.Parameter(1),
+ t.Parameter(2))),
+ t.Int32Constant(0)),
&blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(constant));
- m.Bind(&blockb);
- m.Return(m.Int32Constant(0 - constant));
+ t.Bind(&blocka);
+ t.Return(t.Int32Constant(constant));
+ t.Bind(&blockb);
+ t.Return(t.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
FOR_UINT32_SHIFTS(shift) {
@@ -1844,7 +1844,7 @@ TEST(RunInt32AddInBranch) {
break;
}
int32_t expected = ((i + right) == 0) ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(i, j, shift));
+ CHECK_EQ(expected, t.Call(i, j, shift));
}
}
}
@@ -1906,12 +1906,12 @@ TEST(RunInt32AddInComparison) {
m.machine()->Word32Shl(),
m.machine()->Word32Shr()};
for (size_t n = 0; n < arraysize(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(
+ RawMachineAssemblerTester<int32_t> t(
MachineType::Uint32(), MachineType::Int32(), MachineType::Uint32());
- m.Return(m.Word32Equal(
- m.Int32Add(m.Parameter(0),
- m.AddNode(shops[n], m.Parameter(1), m.Parameter(2))),
- m.Int32Constant(0)));
+ t.Return(t.Word32Equal(
+ t.Int32Add(t.Parameter(0),
+ t.AddNode(shops[n], t.Parameter(1), t.Parameter(2))),
+ t.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
FOR_UINT32_SHIFTS(shift) {
@@ -1930,7 +1930,7 @@ TEST(RunInt32AddInComparison) {
break;
}
int32_t expected = (i + right) == 0;
- CHECK_EQ(expected, m.Call(i, j, shift));
+ CHECK_EQ(expected, t.Call(i, j, shift));
}
}
}
@@ -2159,18 +2159,18 @@ TEST(RunInt32SubInBranch) {
m.machine()->Word32Shl(),
m.machine()->Word32Shr()};
for (size_t n = 0; n < arraysize(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(
+ RawMachineAssemblerTester<int32_t> t(
MachineType::Uint32(), MachineType::Int32(), MachineType::Uint32());
RawMachineLabel blocka, blockb;
- m.Branch(m.Word32Equal(m.Int32Sub(m.Parameter(0),
- m.AddNode(shops[n], m.Parameter(1),
- m.Parameter(2))),
- m.Int32Constant(0)),
+ t.Branch(t.Word32Equal(t.Int32Sub(t.Parameter(0),
+ t.AddNode(shops[n], t.Parameter(1),
+ t.Parameter(2))),
+ t.Int32Constant(0)),
&blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(constant));
- m.Bind(&blockb);
- m.Return(m.Int32Constant(0 - constant));
+ t.Bind(&blocka);
+ t.Return(t.Int32Constant(constant));
+ t.Bind(&blockb);
+ t.Return(t.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
FOR_UINT32_SHIFTS(shift) {
@@ -2189,7 +2189,7 @@ TEST(RunInt32SubInBranch) {
break;
}
int32_t expected = ((i - right) == 0) ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(i, j, shift));
+ CHECK_EQ(expected, t.Call(i, j, shift));
}
}
}
@@ -2251,12 +2251,12 @@ TEST(RunInt32SubInComparison) {
m.machine()->Word32Shl(),
m.machine()->Word32Shr()};
for (size_t n = 0; n < arraysize(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(
+ RawMachineAssemblerTester<int32_t> t(
MachineType::Uint32(), MachineType::Int32(), MachineType::Uint32());
- m.Return(m.Word32Equal(
- m.Int32Sub(m.Parameter(0),
- m.AddNode(shops[n], m.Parameter(1), m.Parameter(2))),
- m.Int32Constant(0)));
+ t.Return(t.Word32Equal(
+ t.Int32Sub(t.Parameter(0),
+ t.AddNode(shops[n], t.Parameter(1), t.Parameter(2))),
+ t.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
FOR_UINT32_SHIFTS(shift) {
@@ -2275,7 +2275,7 @@ TEST(RunInt32SubInComparison) {
break;
}
int32_t expected = (i - right) == 0;
- CHECK_EQ(expected, m.Call(i, j, shift));
+ CHECK_EQ(expected, t.Call(i, j, shift));
}
}
}
@@ -2835,18 +2835,18 @@ TEST(RunWord32AndInBranch) {
m.machine()->Word32Shl(),
m.machine()->Word32Shr()};
for (size_t n = 0; n < arraysize(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(
+ RawMachineAssemblerTester<int32_t> t(
MachineType::Uint32(), MachineType::Int32(), MachineType::Uint32());
RawMachineLabel blocka, blockb;
- m.Branch(m.Word32Equal(m.Word32And(m.Parameter(0),
- m.AddNode(shops[n], m.Parameter(1),
- m.Parameter(2))),
- m.Int32Constant(0)),
+ t.Branch(t.Word32Equal(t.Word32And(t.Parameter(0),
+ t.AddNode(shops[n], t.Parameter(1),
+ t.Parameter(2))),
+ t.Int32Constant(0)),
&blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(constant));
- m.Bind(&blockb);
- m.Return(m.Int32Constant(0 - constant));
+ t.Bind(&blocka);
+ t.Return(t.Int32Constant(constant));
+ t.Bind(&blockb);
+ t.Return(t.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
FOR_UINT32_SHIFTS(shift) {
@@ -2865,7 +2865,7 @@ TEST(RunWord32AndInBranch) {
break;
}
int32_t expected = ((i & right) == 0) ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(i, j, shift));
+ CHECK_EQ(expected, t.Call(i, j, shift));
}
}
}
@@ -3064,18 +3064,18 @@ TEST(RunWord32OrInBranch) {
m.machine()->Word32Shl(),
m.machine()->Word32Shr()};
for (size_t n = 0; n < arraysize(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(
+ RawMachineAssemblerTester<int32_t> t(
MachineType::Uint32(), MachineType::Int32(), MachineType::Uint32());
RawMachineLabel blocka, blockb;
- m.Branch(m.Word32Equal(m.Word32Or(m.Parameter(0),
- m.AddNode(shops[n], m.Parameter(1),
- m.Parameter(2))),
- m.Int32Constant(0)),
+ t.Branch(t.Word32Equal(t.Word32Or(t.Parameter(0),
+ t.AddNode(shops[n], t.Parameter(1),
+ t.Parameter(2))),
+ t.Int32Constant(0)),
&blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(constant));
- m.Bind(&blockb);
- m.Return(m.Int32Constant(0 - constant));
+ t.Bind(&blocka);
+ t.Return(t.Int32Constant(constant));
+ t.Bind(&blockb);
+ t.Return(t.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
FOR_UINT32_SHIFTS(shift) {
@@ -3094,7 +3094,7 @@ TEST(RunWord32OrInBranch) {
break;
}
int32_t expected = ((i | right) == 0) ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(i, j, shift));
+ CHECK_EQ(expected, t.Call(i, j, shift));
}
}
}
@@ -3289,18 +3289,18 @@ TEST(RunWord32XorInBranch) {
m.machine()->Word32Shl(),
m.machine()->Word32Shr()};
for (size_t n = 0; n < arraysize(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(
+ RawMachineAssemblerTester<int32_t> t(
MachineType::Uint32(), MachineType::Int32(), MachineType::Uint32());
RawMachineLabel blocka, blockb;
- m.Branch(m.Word32Equal(m.Word32Xor(m.Parameter(0),
- m.AddNode(shops[n], m.Parameter(1),
- m.Parameter(2))),
- m.Int32Constant(0)),
+ t.Branch(t.Word32Equal(t.Word32Xor(t.Parameter(0),
+ t.AddNode(shops[n], t.Parameter(1),
+ t.Parameter(2))),
+ t.Int32Constant(0)),
&blocka, &blockb);
- m.Bind(&blocka);
- m.Return(m.Int32Constant(constant));
- m.Bind(&blockb);
- m.Return(m.Int32Constant(0 - constant));
+ t.Bind(&blocka);
+ t.Return(t.Int32Constant(constant));
+ t.Bind(&blockb);
+ t.Return(t.Int32Constant(0 - constant));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
FOR_UINT32_SHIFTS(shift) {
@@ -3319,7 +3319,7 @@ TEST(RunWord32XorInBranch) {
break;
}
int32_t expected = ((i ^ right) == 0) ? constant : 0 - constant;
- CHECK_EQ(expected, m.Call(i, j, shift));
+ CHECK_EQ(expected, t.Call(i, j, shift));
}
}
}
@@ -3857,13 +3857,13 @@ TEST(RunDeadInt32Binops) {
m.machine()->Uint32LessThanOrEqual()};
for (size_t i = 0; i < arraysize(kOps); ++i) {
- RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ RawMachineAssemblerTester<int32_t> t(MachineType::Int32(),
MachineType::Int32());
int32_t constant = static_cast<int32_t>(0x55555 + i);
- m.AddNode(kOps[i], m.Parameter(0), m.Parameter(1));
- m.Return(m.Int32Constant(constant));
+ t.AddNode(kOps[i], t.Parameter(0), t.Parameter(1));
+ t.Return(t.Int32Constant(constant));
- CHECK_EQ(constant, m.Call(1, 1));
+ CHECK_EQ(constant, t.Call(1, 1));
}
}
@@ -3985,11 +3985,11 @@ TEST(RunDeadFloat32Binops) {
nullptr};
for (int i = 0; ops[i] != nullptr; i++) {
- RawMachineAssemblerTester<int32_t> m;
+ RawMachineAssemblerTester<int32_t> t;
int constant = 0x53355 + i;
- m.AddNode(ops[i], m.Float32Constant(0.1f), m.Float32Constant(1.11f));
- m.Return(m.Int32Constant(constant));
- CHECK_EQ(constant, m.Call());
+ t.AddNode(ops[i], t.Float32Constant(0.1f), t.Float32Constant(1.11f));
+ t.Return(t.Int32Constant(constant));
+ CHECK_EQ(constant, t.Call());
}
}
@@ -4002,11 +4002,11 @@ TEST(RunDeadFloat64Binops) {
m.machine()->Float64Mod(), nullptr};
for (int i = 0; ops[i] != nullptr; i++) {
- RawMachineAssemblerTester<int32_t> m;
+ RawMachineAssemblerTester<int32_t> t;
int constant = 0x53355 + i;
- m.AddNode(ops[i], m.Float64Constant(0.1), m.Float64Constant(1.11));
- m.Return(m.Int32Constant(constant));
- CHECK_EQ(constant, m.Call());
+ t.AddNode(ops[i], t.Float64Constant(0.1), t.Float64Constant(1.11));
+ t.Return(t.Int32Constant(constant));
+ CHECK_EQ(constant, t.Call());
}
}
@@ -5471,12 +5471,12 @@ TEST(RunFloat64UnorderedCompare) {
FOR_FLOAT64_INPUTS(i) {
for (size_t o = 0; o < arraysize(operators); ++o) {
for (int j = 0; j < 2; j++) {
- RawMachineAssemblerTester<int32_t> m;
- Node* a = m.Float64Constant(i);
- Node* b = m.Float64Constant(nan);
+ RawMachineAssemblerTester<int32_t> t;
+ Node* a = t.Float64Constant(i);
+ Node* b = t.Float64Constant(nan);
if (j == 1) std::swap(a, b);
- m.Return(m.AddNode(operators[o], a, b));
- CHECK_EQ(0, m.Call());
+ t.Return(t.AddNode(operators[o], a, b));
+ CHECK_EQ(0, t.Call());
}
}
}
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index f90b86dbf6..6f46ed6ff2 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -949,10 +949,9 @@ static void Build_Select_With_Call(CallDescriptor* desc,
// Build the actual select.
Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone);
Graph graph(&zone);
- RawMachineAssembler raw(isolate, &graph, desc);
- raw.Return(raw.Parameter(which));
- inner =
- CompileGraph("Select-indirection", desc, &graph, raw.ExportForTest());
+ RawMachineAssembler r(isolate, &graph, desc);
+ r.Return(r.Parameter(which));
+ inner = CompileGraph("Select-indirection", desc, &graph, r.ExportForTest());
CHECK(!inner.is_null());
CHECK(inner->IsCode());
}
@@ -1044,8 +1043,8 @@ void MixedParamTest(int start) {
Handle<Code> select;
{
// build the select.
- Zone zone(&allocator, ZONE_NAME, kCompressGraphZone);
- Graph graph(&zone);
+ Zone select_zone(&allocator, ZONE_NAME, kCompressGraphZone);
+ Graph graph(&select_zone);
RawMachineAssembler raw(isolate, &graph, desc);
raw.Return(raw.Parameter(which));
select = CompileGraph("Compute", desc, &graph, raw.ExportForTest());
@@ -1061,12 +1060,13 @@ void MixedParamTest(int start) {
CSignatureOf<int32_t> csig;
{
// Wrap the select code with a callable function that passes constants.
- Zone zone(&allocator, ZONE_NAME, kCompressGraphZone);
- Graph graph(&zone);
- CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig);
+ Zone wrap_zone(&allocator, ZONE_NAME, kCompressGraphZone);
+ Graph graph(&wrap_zone);
+ CallDescriptor* cdesc =
+ Linkage::GetSimplifiedCDescriptor(&wrap_zone, &csig);
RawMachineAssembler raw(isolate, &graph, cdesc);
Node* target = raw.HeapConstant(select);
- Node** inputs = zone.NewArray<Node*>(num_params + 1);
+ Node** inputs = wrap_zone.NewArray<Node*>(num_params + 1);
int input_count = 0;
inputs[input_count++] = target;
int64_t constant = 0x0102030405060708;
diff --git a/deps/v8/test/cctest/compiler/test-sloppy-equality.cc b/deps/v8/test/cctest/compiler/test-sloppy-equality.cc
index 7533000afb..a521001c82 100644
--- a/deps/v8/test/cctest/compiler/test-sloppy-equality.cc
+++ b/deps/v8/test/cctest/compiler/test-sloppy-equality.cc
@@ -59,8 +59,8 @@ class TestSloppyEqualityFactory {
TEST(TestSloppyEquality) {
FlagScope<bool> allow_natives_syntax(&i::FLAG_allow_natives_syntax, true);
FlagScope<bool> always_opt(&i::FLAG_always_opt, false);
- HandleAndZoneScope scope;
- Isolate* isolate = scope.main_isolate();
+ HandleAndZoneScope handle_and_zone_scope;
+ Isolate* isolate = handle_and_zone_scope.main_isolate();
Zone zone(isolate->allocator(), ZONE_NAME);
TestSloppyEqualityFactory f(&zone);
// TODO(nicohartmann@, v8:5660): Collect more precise feedback for some useful
diff --git a/deps/v8/test/cctest/compiler/value-helper.h b/deps/v8/test/cctest/compiler/value-helper.h
index c8cd9833ff..001499349a 100644
--- a/deps/v8/test/cctest/compiler/value-helper.h
+++ b/deps/v8/test/cctest/compiler/value-helper.h
@@ -357,6 +357,16 @@ inline base::Vector<const int64_t> ValueHelper::GetVector() {
return int64_vector();
}
+template <>
+inline base::Vector<const float> ValueHelper::GetVector() {
+ return float32_vector();
+}
+
+template <>
+inline base::Vector<const double> ValueHelper::GetVector() {
+ return float64_vector();
+}
+
// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... i ... }
#define FOR_INPUTS(ctype, itype, var) \
for (ctype var : ::v8::internal::compiler::ValueHelper::itype##_vector())
diff --git a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
index 6465c74870..e91ecd150b 100644
--- a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
@@ -15,22 +15,22 @@
namespace {
bool IsTrackedYoung(i::Heap* heap, i::ArrayBufferExtension* extension) {
- bool in_young = heap->array_buffer_sweeper()->young().Contains(extension);
- bool in_old = heap->array_buffer_sweeper()->old().Contains(extension);
+ bool in_young = heap->array_buffer_sweeper()->young().ContainsSlow(extension);
+ bool in_old = heap->array_buffer_sweeper()->old().ContainsSlow(extension);
CHECK(!(in_young && in_old));
return in_young;
}
bool IsTrackedOld(i::Heap* heap, i::ArrayBufferExtension* extension) {
- bool in_young = heap->array_buffer_sweeper()->young().Contains(extension);
- bool in_old = heap->array_buffer_sweeper()->old().Contains(extension);
+ bool in_young = heap->array_buffer_sweeper()->young().ContainsSlow(extension);
+ bool in_old = heap->array_buffer_sweeper()->old().ContainsSlow(extension);
CHECK(!(in_young && in_old));
return in_old;
}
bool IsTracked(i::Heap* heap, i::ArrayBufferExtension* extension) {
- bool in_young = heap->array_buffer_sweeper()->young().Contains(extension);
- bool in_old = heap->array_buffer_sweeper()->old().Contains(extension);
+ bool in_young = heap->array_buffer_sweeper()->young().ContainsSlow(extension);
+ bool in_old = heap->array_buffer_sweeper()->old().ContainsSlow(extension);
CHECK(!(in_young && in_old));
return in_young || in_old;
}
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index a6a5ba7a74..82e210af0c 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -40,7 +40,7 @@
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/execution.h"
-#include "src/handles/global-handles.h"
+#include "src/handles/global-handles-inl.h"
#include "src/heap/combined-heap.h"
#include "src/heap/factory.h"
#include "src/heap/gc-tracer.h"
@@ -60,7 +60,7 @@
#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
-#include "src/objects/managed.h"
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
#include "src/objects/stack-frame-info-inl.h"
@@ -7318,7 +7318,6 @@ TEST(Regress11181) {
v8::HandleScope scope(CcTest::isolate());
GenerateGarbage();
CcTest::CollectAllAvailableGarbage();
- TracingFlags::runtime_stats.store(0, std::memory_order_relaxed);
}
TEST(LongTaskStatsFullAtomic) {
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index 1f8bc11982..0f0f5fe5af 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -175,7 +175,8 @@ TEST(MemoryChunk) {
base::BoundedPageAllocator code_page_allocator(
page_allocator, code_range_reservation.address(),
- code_range_reservation.size(), MemoryChunk::kAlignment);
+ code_range_reservation.size(), MemoryChunk::kAlignment,
+ base::PageInitializationMode::kAllocatedPagesCanBeUninitialized);
VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size,
initial_commit_area_size, EXECUTABLE, heap->code_space());
@@ -355,7 +356,7 @@ TEST(OldLargeObjectSpace) {
#ifndef DEBUG
// The test verifies that committed size of a space is less then some threshold.
// Debug builds pull in all sorts of additional instrumentation that increases
-// heap sizes. E.g. CSA_ASSERT creates on-heap strings for error messages. These
+// heap sizes. E.g. CSA_DCHECK creates on-heap strings for error messages. These
// messages are also not stable if files are moved and modified during the build
// process (jumbo builds).
TEST(SizeOfInitialHeap) {
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index 93454b22f2..f7f3bff26e 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -17,69 +17,33 @@ namespace v8 {
namespace internal {
namespace interpreter {
-#define XSTR(A) #A
-#define STR(A) XSTR(A)
-
-#define UNIQUE_VAR() "var a" STR(__COUNTER__) " = 0;\n"
-
-#define LOAD_UNIQUE_PROPERTY() " b.name" STR(__COUNTER__) ";\n"
-
-#define REPEAT_2(...) __VA_ARGS__ __VA_ARGS__
-#define REPEAT_4(...) REPEAT_2(__VA_ARGS__) REPEAT_2(__VA_ARGS__)
-#define REPEAT_8(...) REPEAT_4(__VA_ARGS__) REPEAT_4(__VA_ARGS__)
-#define REPEAT_16(...) REPEAT_8(__VA_ARGS__) REPEAT_8(__VA_ARGS__)
-#define REPEAT_32(...) REPEAT_16(__VA_ARGS__) REPEAT_16(__VA_ARGS__)
-#define REPEAT_64(...) REPEAT_32(__VA_ARGS__) REPEAT_32(__VA_ARGS__)
-#define REPEAT_128(...) REPEAT_64(__VA_ARGS__) REPEAT_64(__VA_ARGS__)
-#define REPEAT_256(...) REPEAT_128(__VA_ARGS__) REPEAT_128(__VA_ARGS__)
-
-#define REPEAT_127(...) \
- REPEAT_64(__VA_ARGS__) \
- REPEAT_32(__VA_ARGS__) \
- REPEAT_16(__VA_ARGS__) \
- REPEAT_8(__VA_ARGS__) \
- REPEAT_4(__VA_ARGS__) \
- REPEAT_2(__VA_ARGS__) \
- __VA_ARGS__
-
-#define REPEAT_249(...) \
- REPEAT_127(__VA_ARGS__) \
- REPEAT_64(__VA_ARGS__) \
- REPEAT_32(__VA_ARGS__) \
- REPEAT_16(__VA_ARGS__) \
- REPEAT_8(__VA_ARGS__) \
- REPEAT_2(__VA_ARGS__)
-
-#define REPEAT_2_UNIQUE_VARS() UNIQUE_VAR() UNIQUE_VAR()
-#define REPEAT_4_UNIQUE_VARS() REPEAT_2_UNIQUE_VARS() REPEAT_2_UNIQUE_VARS()
-#define REPEAT_8_UNIQUE_VARS() REPEAT_4_UNIQUE_VARS() REPEAT_4_UNIQUE_VARS()
-#define REPEAT_16_UNIQUE_VARS() REPEAT_8_UNIQUE_VARS() REPEAT_8_UNIQUE_VARS()
-#define REPEAT_32_UNIQUE_VARS() REPEAT_16_UNIQUE_VARS() REPEAT_16_UNIQUE_VARS()
-#define REPEAT_64_UNIQUE_VARS() REPEAT_32_UNIQUE_VARS() REPEAT_32_UNIQUE_VARS()
-#define REPEAT_128_UNIQUE_VARS() REPEAT_64_UNIQUE_VARS() REPEAT_64_UNIQUE_VARS()
-
-#define REPEAT_252_UNIQUE_VARS() \
- REPEAT_128_UNIQUE_VARS() \
- REPEAT_64_UNIQUE_VARS() \
- REPEAT_32_UNIQUE_VARS() \
- REPEAT_16_UNIQUE_VARS() \
- REPEAT_8_UNIQUE_VARS() \
- REPEAT_4_UNIQUE_VARS()
-
-#define REPEAT_2_LOAD_UNIQUE_PROPERTY() \
- LOAD_UNIQUE_PROPERTY() LOAD_UNIQUE_PROPERTY()
-#define REPEAT_4_LOAD_UNIQUE_PROPERTY() \
- REPEAT_2_LOAD_UNIQUE_PROPERTY() REPEAT_2_LOAD_UNIQUE_PROPERTY()
-#define REPEAT_8_LOAD_UNIQUE_PROPERTY() \
- REPEAT_4_LOAD_UNIQUE_PROPERTY() REPEAT_4_LOAD_UNIQUE_PROPERTY()
-#define REPEAT_16_LOAD_UNIQUE_PROPERTY() \
- REPEAT_8_LOAD_UNIQUE_PROPERTY() REPEAT_8_LOAD_UNIQUE_PROPERTY()
-#define REPEAT_32_LOAD_UNIQUE_PROPERTY() \
- REPEAT_16_LOAD_UNIQUE_PROPERTY() REPEAT_16_LOAD_UNIQUE_PROPERTY()
-#define REPEAT_64_LOAD_UNIQUE_PROPERTY() \
- REPEAT_32_LOAD_UNIQUE_PROPERTY() REPEAT_32_LOAD_UNIQUE_PROPERTY()
-#define REPEAT_128_LOAD_UNIQUE_PROPERTY() \
- REPEAT_64_LOAD_UNIQUE_PROPERTY() REPEAT_64_LOAD_UNIQUE_PROPERTY()
+int global_counter = 0; // For unique variable/property names.
+
+std::string LoadUniqueProperties(int n) {
+ // Don't take any fancy recursive shortcuts here because
+ // {LoadUniqueProperty} must actually be called {n} times.
+ std::string result;
+ for (int i = 0; i < n; i++) {
+ result += " b.name" + std::to_string(global_counter++) + ";\n";
+ }
+ return result;
+}
+
+std::string UniqueVars(int n) {
+ std::string result;
+ for (int i = 0; i < n; i++) {
+ result += "var a" + std::to_string(global_counter++) + " = 0;\n";
+ }
+ return result;
+}
+
+std::string Repeat(std::string s, int n) {
+ if (n == 1) return s;
+ std::string half = Repeat(s, n >> 1);
+ std::string result = half + half;
+ if (n & 1) result += s;
+ return result;
+}
static const char* kGoldenFileDirectory =
"test/cctest/interpreter/bytecode_expectations/";
@@ -115,11 +79,11 @@ std::string LoadGolden(const std::string& golden_filename) {
template <size_t N>
std::string BuildActual(const BytecodeExpectationsPrinter& printer,
- const char* (&snippet_list)[N],
+ std::string (&snippet_list)[N],
const char* prologue = nullptr,
const char* epilogue = nullptr) {
std::ostringstream actual_stream;
- for (const char* snippet : snippet_list) {
+ for (std::string snippet : snippet_list) {
std::string source_code;
if (prologue) source_code += prologue;
source_code += snippet;
@@ -192,7 +156,7 @@ bool CompareTexts(const std::string& generated, const std::string& expected) {
TEST(PrimitiveReturnStatements) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"",
"return;\n",
@@ -223,7 +187,7 @@ TEST(PrimitiveReturnStatements) {
TEST(PrimitiveExpressions) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"var x = 0; return x;\n",
"var x = 0; return x + 3;\n",
@@ -280,7 +244,7 @@ TEST(PrimitiveExpressions) {
TEST(LogicalExpressions) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"var x = 0; return x || 3;\n",
"var x = 0; return (x == 1) || 3;\n",
@@ -293,21 +257,23 @@ TEST(LogicalExpressions) {
"var a = 2, b = 3, c = 4; return a || (a, b, a, b, c = 5, 3);\n",
- "var x = 1; var a = 2, b = 3; return x || (" //
- REPEAT_32("\n a = 1, b = 2, ") //
+ // clang-format off
+ "var x = 1; var a = 2, b = 3; return x || (" +
+ Repeat("\n a = 1, b = 2, ", 32) +
"3);\n",
- "var x = 0; var a = 2, b = 3; return x && (" //
- REPEAT_32("\n a = 1, b = 2, ") //
+ "var x = 0; var a = 2, b = 3; return x && (" +
+ Repeat("\n a = 1, b = 2, ", 32) +
"3);\n",
- "var x = 1; var a = 2, b = 3; return (x > 3) || (" //
- REPEAT_32("\n a = 1, b = 2, ") //
+ "var x = 1; var a = 2, b = 3; return (x > 3) || (" +
+ Repeat("\n a = 1, b = 2, ", 32) +
"3);\n",
- "var x = 0; var a = 2, b = 3; return (x < 5) && (" //
- REPEAT_32("\n a = 1, b = 2, ") //
+ "var x = 0; var a = 2, b = 3; return (x < 5) && (" +
+ Repeat("\n a = 1, b = 2, ", 32) +
"3);\n",
+ // clang-format on
"return 0 && 3;\n",
@@ -326,7 +292,7 @@ TEST(Parameters) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ std::string snippets[] = {
"function f() { return this; }",
"function f(arg1) { return arg1; }",
@@ -349,7 +315,7 @@ TEST(Parameters) {
TEST(IntegerConstants) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"return 12345678;\n",
"var a = 1234; return 5678;\n",
@@ -364,7 +330,7 @@ TEST(IntegerConstants) {
TEST(HeapNumberConstants) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"return 1.2;\n",
"var a = 1.2; return 2.6;\n",
@@ -379,7 +345,7 @@ TEST(HeapNumberConstants) {
TEST(StringConstants) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"return \"This is a string\";\n",
"var a = \"First string\"; return \"Second string\";\n",
@@ -397,7 +363,8 @@ TEST(PropertyLoads) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ // clang-format off
+ std::string snippets[] = {
"function f(a) { return a.name; }\n"
"f({name : \"test\"});\n",
@@ -414,20 +381,21 @@ TEST(PropertyLoads) {
"f({\"-124\" : \"test\", name : 123 })",
"function f(a) {\n"
- " var b = {};\n"
- REPEAT_128_LOAD_UNIQUE_PROPERTY()
+ " var b = {};\n" +
+ LoadUniqueProperties(128) +
" return a.name;\n"
"}\n"
"f({name : \"test\"})\n",
"function f(a, b) {\n"
" var c;\n"
- " c = a[b];\n"
- REPEAT_127(" c = a[b];\n")
+ " c = a[b];\n" +
+ Repeat(" c = a[b];\n", 127) +
" return a[b];\n"
"}\n"
"f({name : \"test\"}, \"name\")\n",
};
+ // clang-format on
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("PropertyLoads.golden")));
@@ -439,7 +407,7 @@ TEST(PropertyLoadStore) {
printer.set_wrap(false);
printer.set_top_level(true);
- const char* snippets[] = {
+ std::string snippets[] = {
R"(
l = {
'aa': 1.1,
@@ -475,7 +443,7 @@ TEST(IIFE) {
printer.set_top_level(true);
printer.set_print_callee(true);
- const char* snippets[] = {
+ std::string snippets[] = {
R"(
(function() {
l = {};
@@ -543,7 +511,12 @@ TEST(PropertyStores) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ // For historical reasons, this test expects the first unique identifier
+ // to be 128.
+ global_counter = 128;
+
+ // clang-format off
+ std::string snippets[] = {
"function f(a) { a.name = \"val\"; }\n"
"f({name : \"test\"})",
@@ -567,8 +540,8 @@ TEST(PropertyStores) {
"function f(a) {\n"
" a.name = 1;\n"
- " var b = {};\n"
- REPEAT_128_LOAD_UNIQUE_PROPERTY()
+ " var b = {};\n" +
+ LoadUniqueProperties(128) +
" a.name = 2;\n"
"}\n"
"f({name : \"test\"})\n",
@@ -576,27 +549,28 @@ TEST(PropertyStores) {
"function f(a) {\n"
" 'use strict';\n"
" a.name = 1;\n"
- " var b = {};\n"
- REPEAT_128_LOAD_UNIQUE_PROPERTY()
+ " var b = {};\n" +
+ LoadUniqueProperties(128) +
" a.name = 2;\n"
"}\n"
"f({name : \"test\"})\n",
"function f(a, b) {\n"
- " a[b] = 1;\n"
- REPEAT_127(" a[b] = 1;\n")
+ " a[b] = 1;\n" +
+ Repeat(" a[b] = 1;\n", 127) +
" a[b] = 2;\n"
"}\n"
"f({name : \"test\"})\n",
"function f(a, b) {\n"
" 'use strict';\n"
- " a[b] = 1;\n"
- REPEAT_127(" a[b] = 1;\n")
+ " a[b] = 1;\n" +
+ Repeat(" a[b] = 1;\n", 127) +
" a[b] = 2;\n"
"}\n"
"f({name : \"test\"})\n",
};
+ // clang-format on
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("PropertyStores.golden")));
@@ -610,7 +584,12 @@ TEST(PropertyCall) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ // For historical reasons, this test expects the first unique identifier
+ // to be 384.
+ global_counter = 384;
+
+ // clang-format off
+ std::string snippets[] = {
"function f(a) { return a.func(); }\n"
"f(" FUNC_ARG ")",
@@ -621,15 +600,16 @@ TEST(PropertyCall) {
"f(" FUNC_ARG ", 1)",
"function f(a) {\n"
- " var b = {};\n"
- REPEAT_128_LOAD_UNIQUE_PROPERTY()
- " a.func;\n" //
+ " var b = {};\n" +
+ LoadUniqueProperties(128) +
+ " a.func;\n"
" return a.func(); }\n"
"f(" FUNC_ARG ")",
"function f(a) { return a.func(1).func(2).func(3); }\n"
"f(new (function Obj() { this.func = function(a) { return this; }})())",
};
+ // clang-format on
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("PropertyCall.golden")));
@@ -641,7 +621,12 @@ TEST(LoadGlobal) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ // For historical reasons, this test expects the first unique identifier
+ // to be 512.
+ global_counter = 512;
+
+ // clang-format off
+ std::string snippets[] = {
"var a = 1;\n"
"function f() { return a; }\n"
"f()",
@@ -656,12 +641,13 @@ TEST(LoadGlobal) {
"a = 1;\n"
"function f(c) {\n"
- " var b = {};\n"
- REPEAT_128_LOAD_UNIQUE_PROPERTY()
+ " var b = {};\n" +
+ LoadUniqueProperties(128) +
" return a;\n"
"}\n"
"f({name: 1});\n",
};
+ // clang-format on
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("LoadGlobal.golden")));
@@ -673,7 +659,12 @@ TEST(StoreGlobal) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ // For historical reasons, this test expects the first unique identifier
+ // to be 640.
+ global_counter = 640;
+
+ // clang-format off
+ std::string snippets[] = {
"var a = 1;\n"
"function f() { a = 2; }\n"
"f();\n",
@@ -691,8 +682,8 @@ TEST(StoreGlobal) {
"a = 1;\n"
"function f(c) {\n"
- " var b = {};\n"
- REPEAT_128_LOAD_UNIQUE_PROPERTY()
+ " var b = {};\n" +
+ LoadUniqueProperties(128) +
" a = 2;\n"
"}\n"
"f({name: 1});\n",
@@ -700,12 +691,13 @@ TEST(StoreGlobal) {
"a = 1;\n"
"function f(c) {\n"
" 'use strict';\n"
- " var b = {};\n"
- REPEAT_128_LOAD_UNIQUE_PROPERTY()
+ " var b = {};\n" +
+ LoadUniqueProperties(128) +
" a = 2;\n"
"}\n"
"f({name: 1});\n",
};
+ // clang-format on
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("StoreGlobal.golden")));
@@ -717,7 +709,7 @@ TEST(CallGlobal) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ std::string snippets[] = {
"function t() { }\n"
"function f() { return t(); }\n"
"f();\n",
@@ -737,7 +729,7 @@ TEST(CallRuntime) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ std::string snippets[] = {
"function f() { %TheHole() }\n"
"f();\n",
@@ -758,7 +750,8 @@ TEST(IfConditions) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ // clang-format off
+ std::string snippets[] = {
"function f() {\n"
" if (0) {\n"
" return 1;\n"
@@ -815,14 +808,14 @@ TEST(IfConditions) {
"function f(a, b) { if (a in b) { return 200; } }"
"f('prop', { prop: 'yes'});\n",
- "function f(z) { var a = 0; var b = 0; if (a === 0.01) {\n"
- REPEAT_64(" b = a; a = b;\n")
+ "function f(z) { var a = 0; var b = 0; if (a === 0.01) {\n" +
+ Repeat(" b = a; a = b;\n", 64) +
" return 200; } else { return -200; } } f(0.001);\n",
"function f() {\n"
" var a = 0; var b = 0;\n"
- " if (a) {\n"
- REPEAT_64(" b = a; a = b;\n")
+ " if (a) {\n" +
+ Repeat(" b = a; a = b;\n", 64) +
" return 200; } else { return -200; }\n"
"};\n"
"f();\n",
@@ -861,6 +854,7 @@ TEST(IfConditions) {
"};\n"
"f(-1, 1);\n",
};
+ // clang-format on
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("IfConditions.golden")));
@@ -873,7 +867,7 @@ TEST(DeclareGlobals) {
printer.set_test_function_name("f");
printer.set_top_level(true);
- const char* snippets[] = {
+ std::string snippets[] = {
"var a = 1;\n",
"function f() {}\n",
@@ -893,7 +887,7 @@ TEST(BreakableBlocks) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"var x = 0;\n"
"label: {\n"
" x = x + 1;\n"
@@ -938,7 +932,7 @@ TEST(BreakableBlocks) {
TEST(BasicLoops) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"var x = 0;\n"
"while (false) { x = 99; break; continue; }\n"
"return x;\n",
@@ -1091,7 +1085,7 @@ TEST(BasicLoops) {
TEST(UnaryOperators) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"var x = 0;\n"
"while (x != 10) {\n"
" x = x + 10;\n"
@@ -1131,7 +1125,7 @@ TEST(Typeof) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ std::string snippets[] = {
"function f() {\n"
" var x = 13;\n"
" return typeof(x);\n"
@@ -1151,7 +1145,7 @@ TEST(CompareTypeOf) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"return typeof(1) === 'number';\n",
"return 'string' === typeof('foo');\n",
@@ -1171,7 +1165,7 @@ TEST(CompareNil) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"var a = 1;\n"
"return a === null;\n",
@@ -1226,7 +1220,7 @@ TEST(Delete) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"var a = {x:13, y:14}; return delete a.x;\n",
"'use strict'; var a = {x:13, y:14}; return delete a.x;\n",
@@ -1255,7 +1249,7 @@ TEST(GlobalDelete) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ std::string snippets[] = {
"var a = {x:13, y:14};\n"
"function f() {\n"
" return delete a.x;\n"
@@ -1290,7 +1284,7 @@ TEST(FunctionLiterals) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"return function(){ }\n",
"return (function(){ })()\n",
@@ -1306,7 +1300,7 @@ TEST(RegExpLiterals) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"return /ab+d/;\n",
"return /(\\w+)\\s(\\w+)/i;\n",
@@ -1322,7 +1316,7 @@ TEST(ArrayLiterals) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"return [ 1, 2 ];\n",
"var a = 1; return [ a, a + 1 ];\n",
@@ -1346,7 +1340,7 @@ TEST(ObjectLiterals) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"return { };\n",
"return { name: 'string', val: 9.2 };\n",
@@ -1389,7 +1383,7 @@ TEST(TopLevelObjectLiterals) {
printer.set_test_function_name("f");
printer.set_top_level(true);
- const char* snippets[] = {
+ std::string snippets[] = {
"var a = { func: function() { } };\n",
};
@@ -1401,7 +1395,7 @@ TEST(TryCatch) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"try { return 1; } catch(e) { return 2; }\n",
"var a;\n"
@@ -1416,7 +1410,7 @@ TEST(TryCatch) {
TEST(TryFinally) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"var a = 1;\n"
"try { a = 2; } finally { a = 3; }\n",
@@ -1435,7 +1429,7 @@ TEST(TryFinally) {
TEST(Throw) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"throw 1;\n",
"throw 'Error';\n",
@@ -1453,7 +1447,7 @@ TEST(CallNew) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ std::string snippets[] = {
"function bar() { this.value = 0; }\n"
"function f() { return new bar(); }\n"
"f();\n",
@@ -1482,9 +1476,14 @@ TEST(ContextVariables) {
// of unique variables to trigger the wide slot load / store.
STATIC_ASSERT(Context::MIN_CONTEXT_EXTENDED_SLOTS + 3 + 250 == 256);
+ // For historical reasons, this test expects the first unique identifier
+ // to be 896.
+ global_counter = 896;
+
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ // clang-format off
+ std::string snippets[] = {
"var a; return function() { a = 1; };\n",
"var a = 1; return function() { a = 2; };\n",
@@ -1497,12 +1496,13 @@ TEST(ContextVariables) {
"let a = 1;\n"
"{ let b = 2; return function() { a + b; }; }\n",
- "'use strict';\n"
- REPEAT_252_UNIQUE_VARS()
+ "'use strict';\n" +
+ UniqueVars(252) +
"eval();\n"
"var b = 100;\n"
"return b\n",
};
+ // clang-format on
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("ContextVariables.golden")));
@@ -1514,7 +1514,7 @@ TEST(ContextParameters) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ std::string snippets[] = {
"function f(arg1) { return function() { arg1 = 2; }; }",
"function f(arg1) { var a = function() { arg1 = 2; }; return arg1; }",
@@ -1534,7 +1534,7 @@ TEST(OuterContextVariables) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ std::string snippets[] = {
"function Outer() {\n"
" var outerVar = 1;\n"
" function Inner(innerArg) {\n"
@@ -1561,7 +1561,7 @@ TEST(OuterContextVariables) {
TEST(CountOperators) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"var a = 1; return ++a;\n",
"var a = 1; return a++;\n",
@@ -1595,7 +1595,7 @@ TEST(GlobalCountOperators) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ std::string snippets[] = {
"var global = 1;\n"
"function f() { return ++global; }\n"
"f();\n",
@@ -1620,7 +1620,7 @@ TEST(GlobalCountOperators) {
TEST(CompoundExpressions) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"var a = 1; a += 2;\n",
"var a = 1; a /= 2;\n",
@@ -1642,7 +1642,7 @@ TEST(GlobalCompoundExpressions) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ std::string snippets[] = {
"var global = 1;\n"
"function f() { return global &= 1; }\n"
"f();\n",
@@ -1662,7 +1662,7 @@ TEST(CreateArguments) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ std::string snippets[] = {
"function f() { return arguments; }",
"function f() { return arguments[0]; }",
@@ -1686,7 +1686,7 @@ TEST(CreateRestParameter) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ std::string snippets[] = {
"function f(...restArgs) { return restArgs; }",
"function f(a, ...restArgs) { return restArgs; }",
@@ -1703,7 +1703,7 @@ TEST(CreateRestParameter) {
TEST(ForIn) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"for (var p in null) {}\n",
"for (var p in undefined) {}\n",
@@ -1733,7 +1733,7 @@ TEST(ForIn) {
TEST(ForOf) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"for (var p of [0, 1, 2]) {}\n",
"var x = 'potatoes';\n"
@@ -1755,7 +1755,7 @@ TEST(ForOf) {
TEST(Conditional) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"return 1 ? 2 : 3;\n",
"return 1 ? 2 ? 3 : 4 : 5;\n",
@@ -1773,7 +1773,8 @@ TEST(Conditional) {
TEST(Switch) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ // clang-format off
+ std::string snippets[] = {
"var a = 1;\n"
"switch(a) {\n"
" case 1: return 2;\n"
@@ -1814,8 +1815,8 @@ TEST(Switch) {
"var a = 1;\n"
"switch(a) {\n"
- " case 1:\n"
- REPEAT_64(" a = 2;\n")
+ " case 1:\n" +
+ Repeat(" a = 2;\n", 64) +
" break;\n"
" case 2:\n"
" a = 3;\n"
@@ -1832,6 +1833,7 @@ TEST(Switch) {
" case 2: a = 3;\n"
"}\n",
};
+ // clang-format on
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("Switch.golden")));
@@ -1840,7 +1842,7 @@ TEST(Switch) {
TEST(BasicBlockToBoolean) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"var a = 1; if (a || a < 0) { return 1; }\n",
"var a = 1; if (a && a < 0) { return 1; }\n",
@@ -1855,7 +1857,7 @@ TEST(BasicBlockToBoolean) {
TEST(DeadCodeRemoval) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"return; var a = 1; a();\n",
"if (false) { return; }; var a = 1;\n",
@@ -1875,7 +1877,7 @@ TEST(ThisFunction) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ std::string snippets[] = {
"var f;\n"
"f = function f() {};",
@@ -1891,7 +1893,7 @@ TEST(NewTarget) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"return new.target;\n",
"new.target;\n",
@@ -1904,7 +1906,7 @@ TEST(NewTarget) {
TEST(RemoveRedundantLdar) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"var ld_a = 1;\n" // This test is to check Ldar does not
"while(true) {\n" // get removed if the preceding Star is
" ld_a = ld_a + ld_a;\n" // in a different basicblock.
@@ -1931,7 +1933,7 @@ TEST(RemoveRedundantLdar) {
TEST(GenerateTestUndetectable) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"var obj_a = {val:1};\n"
"var b = 10;\n"
"if (obj_a == null) { b = 20;}\n"
@@ -1979,7 +1981,7 @@ TEST(GenerateTestUndetectable) {
TEST(AssignmentsInBinaryExpression) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"var x = 0, y = 1;\n"
"return (x = 2, y = 3, x = 4, y = 5);\n",
@@ -2019,7 +2021,7 @@ TEST(AssignmentsInBinaryExpression) {
TEST(DestructuringAssignment) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"var x, a = [0,1,2,3];\n"
"[x] = a;\n",
@@ -2046,7 +2048,7 @@ TEST(DestructuringAssignment) {
TEST(Eval) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"return eval('1;');\n",
};
@@ -2060,7 +2062,7 @@ TEST(LookupSlot) {
printer.set_test_function_name("f");
// clang-format off
- const char* snippets[] = {
+ std::string snippets[] = {
"eval('var x = 10;'); return x;\n",
"eval('var x = 10;'); return typeof x;\n",
@@ -2090,7 +2092,7 @@ TEST(LookupSlot) {
TEST(CallLookupSlot) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"g = function(){}; eval(''); return g();\n",
};
@@ -2106,7 +2108,7 @@ TEST(LookupSlotInEval) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ std::string snippets[] = {
"return x;",
"x = 10;",
@@ -2135,7 +2137,7 @@ TEST(DeleteLookupSlotInEval) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ std::string snippets[] = {
"delete x;",
"return delete y;",
@@ -2168,7 +2170,7 @@ TEST(WideRegisters) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"x0 = x127;\n"
"return x0;\n",
@@ -2210,7 +2212,7 @@ TEST(WideRegisters) {
TEST(ConstVariable) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"const x = 10;\n",
"const x = 10; return x;\n",
@@ -2227,7 +2229,7 @@ TEST(ConstVariable) {
TEST(LetVariable) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"let x = 10;\n",
"let x = 10; return x;\n",
@@ -2246,7 +2248,7 @@ TEST(ConstVariableContextSlot) {
// TODO(mythria): Add tests that walk the context chain.
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"const x = 10; function f1() {return x;}\n",
"const x = 10; function f1() {return x;} return x;\n",
@@ -2263,7 +2265,7 @@ TEST(ConstVariableContextSlot) {
TEST(LetVariableContextSlot) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"let x = 10; function f1() {return x;}\n",
"let x = 10; function f1() {return x;} return x;\n",
@@ -2280,7 +2282,7 @@ TEST(LetVariableContextSlot) {
TEST(WithStatement) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"with ({x:42}) { return x; }\n",
};
@@ -2291,7 +2293,7 @@ TEST(WithStatement) {
TEST(DoDebugger) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"debugger;\n",
};
@@ -2302,7 +2304,7 @@ TEST(DoDebugger) {
TEST(ClassDeclarations) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"class Person {\n"
" constructor(name) { this.name = name; }\n"
" speak() { console.log(this.name + ' is speaking.'); }\n"
@@ -2337,7 +2339,7 @@ TEST(ClassAndSuperClass) {
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("test");
- const char* snippets[] = {
+ std::string snippets[] = {
"var test;\n"
"(function() {\n"
" class A {\n"
@@ -2394,7 +2396,7 @@ TEST(PublicClassFields) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"{\n"
" class A {\n"
" a;\n"
@@ -2445,7 +2447,7 @@ TEST(PrivateClassFields) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"{\n"
" class A {\n"
" #a;\n"
@@ -2504,7 +2506,7 @@ TEST(PrivateClassFieldAccess) {
printer.set_wrap(false);
printer.set_test_function_name("test");
- const char* snippets[] = {
+ std::string snippets[] = {
"class A {\n"
" #a;\n"
" #b;\n"
@@ -2538,7 +2540,7 @@ TEST(PrivateMethodDeclaration) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"{\n"
" class A {\n"
" #a() { return 1; }\n"
@@ -2571,7 +2573,7 @@ TEST(PrivateMethodAccess) {
printer.set_wrap(false);
printer.set_test_function_name("test");
- const char* snippets[] = {
+ std::string snippets[] = {
"class A {\n"
" #a() { return 1; }\n"
" constructor() { return this.#a(); }\n"
@@ -2614,7 +2616,7 @@ TEST(PrivateAccessorAccess) {
printer.set_wrap(false);
printer.set_test_function_name("test");
- const char* snippets[] = {
+ std::string snippets[] = {
"class A {\n"
" get #a() { return 1; }\n"
" set #a(val) { }\n"
@@ -2664,7 +2666,7 @@ TEST(StaticPrivateMethodDeclaration) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"{\n"
" class A {\n"
" static #a() { return 1; }\n"
@@ -2707,7 +2709,7 @@ TEST(StaticPrivateMethodAccess) {
printer.set_wrap(false);
printer.set_test_function_name("test");
- const char* snippets[] = {
+ std::string snippets[] = {
"class A {\n"
" static #a() { return 1; }\n"
" static test() { return this.#a(); }\n"
@@ -2782,7 +2784,7 @@ TEST(PrivateAccessorDeclaration) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"{\n"
" class A {\n"
" get #a() { return 1; }\n"
@@ -2838,7 +2840,7 @@ TEST(StaticClassFields) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"{\n"
" class A {\n"
" a;\n"
@@ -2901,7 +2903,7 @@ TEST(Generators) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ std::string snippets[] = {
"function* f() { }\n"
"f();\n",
@@ -2926,7 +2928,7 @@ TEST(AsyncGenerators) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ std::string snippets[] = {
"async function* f() { }\n"
"f();\n",
@@ -2952,7 +2954,7 @@ TEST(Modules) {
printer.set_module(true);
printer.set_top_level(true);
- const char* snippets[] = {
+ std::string snippets[] = {
"import \"bar\";\n",
"import {foo} from \"bar\";\n",
@@ -2998,7 +3000,7 @@ TEST(AsyncModules) {
printer.set_module(true);
printer.set_top_level(true);
- const char* snippets[] = {
+ std::string snippets[] = {
"await 42;\n",
"await import(\"foo\");\n",
@@ -3023,7 +3025,7 @@ TEST(SuperCallAndSpread) {
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("test");
- const char* snippets[] = {
+ std::string snippets[] = {
"var test;\n"
"(function() {\n"
" class A {\n"
@@ -3063,7 +3065,7 @@ TEST(SuperCallAndSpread) {
TEST(CallAndSpread) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {"Math.max(...[1, 2, 3]);\n",
+ std::string snippets[] = {"Math.max(...[1, 2, 3]);\n",
"Math.max(0, ...[1, 2, 3]);\n",
"Math.max(0, ...[1, 2, 3], 4);\n"};
@@ -3074,7 +3076,7 @@ TEST(CallAndSpread) {
TEST(NewAndSpread) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"class A { constructor(...args) { this.args = args; } }\n"
"new A(...[1, 2, 3]);\n",
@@ -3094,7 +3096,7 @@ TEST(ForAwaitOf) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ std::string snippets[] = {
"async function f() {\n"
" for await (let x of [1, 2, 3]) {}\n"
"}\n"
@@ -3129,7 +3131,7 @@ TEST(StandardForLoop) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ std::string snippets[] = {
"function f() {\n"
" for (let x = 0; x < 10; ++x) { let y = x; }\n"
"}\n"
@@ -3180,7 +3182,7 @@ TEST(ForOfLoop) {
printer.set_wrap(false);
printer.set_test_function_name("f");
- const char* snippets[] = {
+ std::string snippets[] = {
"function f(arr) {\n"
" for (let x of arr) { let y = x; }\n"
"}\n"
@@ -3229,7 +3231,7 @@ TEST(StringConcat) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"var a = 1;\n"
"var b = 2;\n"
"return a + b + 'string';\n",
@@ -3264,7 +3266,7 @@ TEST(TemplateLiterals) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
- const char* snippets[] = {
+ std::string snippets[] = {
"var a = 1;\n"
"var b = 2;\n"
"return `${a}${b}string`;\n",
@@ -3295,37 +3297,6 @@ TEST(TemplateLiterals) {
LoadGolden("TemplateLiterals.golden")));
}
-#undef XSTR
-#undef STR
-#undef UNIQUE_VAR
-#undef REPEAT_2
-#undef REPEAT_4
-#undef REPEAT_8
-#undef REPEAT_16
-#undef REPEAT_32
-#undef REPEAT_64
-#undef REPEAT_128
-#undef REPEAT_256
-#undef REPEAT_127
-#undef REPEAT_249
-#undef REPEAT_2_UNIQUE_VARS
-#undef REPEAT_4_UNIQUE_VARS
-#undef REPEAT_8_UNIQUE_VARS
-#undef REPEAT_16_UNIQUE_VARS
-#undef REPEAT_32_UNIQUE_VARS
-#undef REPEAT_64_UNIQUE_VARS
-#undef REPEAT_128_UNIQUE_VARS
-#undef REPEAT_252_UNIQUE_VARS
-#undef LOAD_UNIQUE_PROPERTY
-#undef REPEAT_2_LOAD_UNIQUE_PROPERTY
-#undef REPEAT_4_LOAD_UNIQUE_PROPERTY
-#undef REPEAT_8_LOAD_UNIQUE_PROPERTY
-#undef REPEAT_16_LOAD_UNIQUE_PROPERTY
-#undef REPEAT_32_LOAD_UNIQUE_PROPERTY
-#undef REPEAT_64_LOAD_UNIQUE_PROPERTY
-#undef REPEAT_128_LOAD_UNIQUE_PROPERTY
-#undef FUNC_ARG
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/parsing/test-scanner-streams.cc b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
index c2f462a27a..eebe007177 100644
--- a/deps/v8/test/cctest/parsing/test-scanner-streams.cc
+++ b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
@@ -373,8 +373,8 @@ TEST(Utf8ChunkBoundaries) {
v8::internal::ScannerStream::For(
&chunk_source, v8::ScriptCompiler::StreamedSource::UTF8));
- for (size_t i = 0; unicode_ucs2[i]; i++) {
- CHECK_EQ(unicode_ucs2[i], stream->Advance());
+ for (size_t j = 0; unicode_ucs2[j]; j++) {
+ CHECK_EQ(unicode_ucs2[j], stream->Advance());
}
CHECK_EQ(v8::internal::Utf16CharacterStream::kEndOfInput,
stream->Advance());
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index 15f303ab7d..271c991948 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -316,7 +316,7 @@ THREADED_TEST(HandleScopePop) {
int count_before =
i::HandleScope::NumberOfHandles(reinterpret_cast<i::Isolate*>(isolate));
{
- v8::HandleScope scope(isolate);
+ v8::HandleScope inner_scope(isolate);
CompileRun(
"for (var i = 0; i < 1000; i++) {"
" obj.one;"
diff --git a/deps/v8/test/cctest/test-api-array-buffer.cc b/deps/v8/test/cctest/test-api-array-buffer.cc
index 248a6bc04b..d472ebcf32 100644
--- a/deps/v8/test/cctest/test-api-array-buffer.cc
+++ b/deps/v8/test/cctest/test-api-array-buffer.cc
@@ -230,7 +230,18 @@ THREADED_TEST(ArrayBuffer_DetachingScript) {
CheckIsTypedArrayVarDetached("f32a");
CheckIsTypedArrayVarDetached("f64a");
- CHECK(CompileRun("dv.byteLength == 0 && dv.byteOffset == 0")->IsTrue());
+ {
+ v8::TryCatch try_catch(isolate);
+ CompileRun("dv.byteLength == 0 ");
+ CHECK(try_catch.HasCaught());
+ }
+
+ {
+ v8::TryCatch try_catch(isolate);
+ CompileRun("dv.byteOffset == 0");
+ CHECK(try_catch.HasCaught());
+ }
+
CheckDataViewIsDetached(dv);
}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index f7cbc54499..61398f2c5e 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -2627,7 +2627,7 @@ THREADED_TEST(DescriptorInheritance2) {
v8::Local<v8::Script> script = v8_compile("o = new F()");
for (int i = 0; i < 100; i++) {
- v8::HandleScope scope(isolate);
+ v8::HandleScope inner_scope(isolate);
script->Run(env.local()).ToLocalChecked();
}
v8::Local<v8::Object> object = script->Run(env.local())
@@ -7857,7 +7857,7 @@ void InternalFieldCallback(bool global_gc) {
instance_templ->SetInternalFieldCount(2);
v8::Persistent<v8::Object> handle;
{
- v8::HandleScope scope(isolate);
+ v8::HandleScope inner_scope(isolate);
Local<v8::Object> obj = templ->GetFunction(env.local())
.ToLocalChecked()
->NewInstance(env.local())
@@ -9249,7 +9249,8 @@ THREADED_TEST(SecurityChecksForPrototypeChain) {
// Change context to be able to get to the Object function in the
// other context without hitting the security checks.
v8::Local<Value> other_object;
- { Context::Scope scope(other);
+ {
+ Context::Scope context_scope(other);
other_object =
other->Global()->Get(other, v8_str("Object")).ToLocalChecked();
CHECK(other->Global()->Set(other, v8_num(42), v8_num(87)).FromJust());
@@ -9293,7 +9294,8 @@ THREADED_TEST(SecurityChecksForPrototypeChain) {
// Now it gets hairy: Set the prototype for the other global object
// to be the current global object. The prototype chain for 'f' now
// goes through 'other' but ends up in the current global object.
- { Context::Scope scope(other);
+ {
+ Context::Scope context_scope(other);
CHECK(other->Global()
->Set(other, v8_str("__proto__"), current->Global())
.FromJust());
@@ -9589,7 +9591,7 @@ TEST(DetachGlobal) {
// Create a property on the global object in env2.
{
- v8::Context::Scope scope(env2);
+ v8::Context::Scope context_scope(env2);
CHECK(env2->Global()
->Set(env2, v8_str("p"), v8::Integer::New(env2->GetIsolate(), 42))
.FromJust());
@@ -9624,7 +9626,7 @@ TEST(DetachGlobal) {
// Create a property on the global object in env3.
{
- v8::Context::Scope scope(env3);
+ v8::Context::Scope context_scope(env3);
CHECK(env3->Global()
->Set(env3, v8_str("p"), v8::Integer::New(env3->GetIsolate(), 24))
.FromJust());
@@ -9667,7 +9669,7 @@ TEST(DetachedAccesses) {
.FromJust());
{
- v8::Context::Scope scope(env2);
+ v8::Context::Scope context_scope(env2);
CHECK(env2->Global()->Set(env2, v8_str("x"), v8_str("env2_x")).FromJust());
CompileRun(
"function bound_x() { return x; }"
@@ -9706,7 +9708,7 @@ TEST(DetachedAccesses) {
v8::Local<v8::ObjectTemplate>(), env2_global);
env2->SetSecurityToken(foo);
{
- v8::Context::Scope scope(env2);
+ v8::Context::Scope context_scope(env2);
CHECK(env2->Global()->Set(env2, v8_str("x"), v8_str("env3_x")).FromJust());
CHECK(env2->Global()->Set(env2, v8_str("env1"), env1->Global()).FromJust());
result = CompileRun(
@@ -10548,7 +10550,6 @@ THREADED_TEST(ObjectGetOwnPropertyNames) {
->Equals(context.local(), v8_str("length"))
.FromMaybe(false));
for (int i = 0; i < 4; ++i) {
- v8::Local<v8::Value> property;
CHECK(properties->Get(context.local(), i).ToLocal(&property) &&
property->IsInt32());
CHECK_EQ(property.As<v8::Int32>()->Value(), i);
@@ -10594,7 +10595,6 @@ THREADED_TEST(ObjectGetOwnPropertyNames) {
bool concat_found = false;
bool starts_with_found = false;
for (uint32_t i = 0; i < properties->Length(); ++i) {
- v8::Local<v8::Value> property;
CHECK(properties->Get(context.local(), i).ToLocal(&property));
if (!property->IsString()) continue;
if (!concat_found)
@@ -15316,8 +15316,9 @@ TEST(DefineProperty) {
v8::TryCatch try_catch(isolate);
// Use a writable descriptor, otherwise the next test, that changes
// the array length will fail.
- v8::PropertyDescriptor desc(v8_num(42), true);
- CHECK(arr->DefineProperty(env.local(), v8_str("length"), desc).FromJust());
+ v8::PropertyDescriptor desc_writable(v8_num(42), true);
+ CHECK(arr->DefineProperty(env.local(), v8_str("length"), desc_writable)
+ .FromJust());
CHECK(!try_catch.HasCaught());
}
@@ -15412,11 +15413,11 @@ TEST(DefineProperty) {
env->Global()->Get(env.local(), v8_str("get")).ToLocalChecked());
v8::Local<v8::Function> set = v8::Local<v8::Function>::Cast(
env->Global()->Get(env.local(), v8_str("set")).ToLocalChecked());
- v8::PropertyDescriptor desc(get, set);
+ v8::PropertyDescriptor desc_getter_setter(get, set);
p = v8_str("v7");
v8::TryCatch try_catch(isolate);
- CHECK(obj->DefineProperty(env.local(), p, desc).FromJust());
+ CHECK(obj->DefineProperty(env.local(), p, desc_getter_setter).FromJust());
CHECK(!try_catch.HasCaught());
v8::Local<v8::Value> val = obj->Get(env.local(), p).ToLocalChecked();
@@ -15437,12 +15438,12 @@ TEST(DefineProperty) {
// Redefine an existing property.
// desc = {value: 42, enumerable: true}
- v8::PropertyDescriptor desc(v8_num(42));
- desc.set_enumerable(true);
+ v8::PropertyDescriptor desc42(v8_num(42));
+ desc42.set_enumerable(true);
p = v8_str("v8");
v8::TryCatch try_catch(isolate);
- CHECK(obj->DefineProperty(env.local(), p, desc).FromJust());
+ CHECK(obj->DefineProperty(env.local(), p, desc42).FromJust());
CHECK(!try_catch.HasCaught());
// desc = {enumerable: true}
@@ -15476,11 +15477,11 @@ TEST(DefineProperty) {
env->Global()->Get(env.local(), v8_str("get")).ToLocalChecked());
// desc = {get: function() {}}
- v8::PropertyDescriptor desc(get, v8::Local<v8::Function>());
+ v8::PropertyDescriptor desc_getter(get, v8::Local<v8::Function>());
v8::TryCatch try_catch(isolate);
p = v8_str("v9");
- CHECK(obj->DefineProperty(env.local(), p, desc).FromJust());
+ CHECK(obj->DefineProperty(env.local(), p, desc_getter).FromJust());
CHECK(!try_catch.HasCaught());
// desc_empty = {}
@@ -15491,7 +15492,7 @@ TEST(DefineProperty) {
// desc = {get: function() {}}
// Successful because we redefine the getter with its current value.
- CHECK(obj->DefineProperty(env.local(), p, desc).FromJust());
+ CHECK(obj->DefineProperty(env.local(), p, desc_getter).FromJust());
CHECK(!try_catch.HasCaught());
// desc = {get: undefined}
@@ -17345,7 +17346,7 @@ TEST(Regress528) {
// cache to the global object.
const char* source_simple = "1";
{
- v8::HandleScope scope(isolate);
+ v8::HandleScope inner_scope(isolate);
v8::Local<Context> context = Context::New(isolate);
context->Enter();
@@ -17369,7 +17370,7 @@ TEST(Regress528) {
// global object.
const char* source_eval = "function f(){eval('1')}; f()";
{
- v8::HandleScope scope(isolate);
+ v8::HandleScope inner_scope(isolate);
v8::Local<Context> context = Context::New(isolate);
context->Enter();
@@ -17391,7 +17392,7 @@ TEST(Regress528) {
// compilation cache to the global object.
const char* source_exception = "function f(){throw 1;} f()";
{
- v8::HandleScope scope(isolate);
+ v8::HandleScope inner_scope(isolate);
v8::Local<Context> context = Context::New(isolate);
context->Enter();
@@ -20333,7 +20334,7 @@ TEST(SetAutorunMicrotasks) {
env->GetIsolate()->EnqueueMicrotask(
Function::New(env.local(), MicrotaskTwo).ToLocalChecked());
{
- v8::Isolate::SuppressMicrotaskExecutionScope scope(env->GetIsolate());
+ v8::Isolate::SuppressMicrotaskExecutionScope suppress(env->GetIsolate());
CompileRun("1+1;");
CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
CHECK_EQ(3, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
@@ -24264,11 +24265,12 @@ TEST(CodeCache) {
v8::Context::Scope cscope(context);
v8::Local<v8::String> source_string = v8_str(source);
v8::ScriptOrigin script_origin(isolate1, v8_str(origin));
- v8::ScriptCompiler::Source source(source_string, script_origin);
+ v8::ScriptCompiler::Source script_source(source_string, script_origin);
v8::ScriptCompiler::CompileOptions option =
v8::ScriptCompiler::kNoCompileOptions;
v8::Local<v8::Script> script =
- v8::ScriptCompiler::Compile(context, &source, option).ToLocalChecked();
+ v8::ScriptCompiler::Compile(context, &script_source, option)
+ .ToLocalChecked();
cache = v8::ScriptCompiler::CreateCodeCache(script->GetUnboundScript());
}
isolate1->Dispose();
@@ -24281,14 +24283,15 @@ TEST(CodeCache) {
v8::Context::Scope cscope(context);
v8::Local<v8::String> source_string = v8_str(source);
v8::ScriptOrigin script_origin(isolate2, v8_str(origin));
- v8::ScriptCompiler::Source source(source_string, script_origin, cache);
+ v8::ScriptCompiler::Source script_source(source_string, script_origin,
+ cache);
v8::ScriptCompiler::CompileOptions option =
v8::ScriptCompiler::kConsumeCodeCache;
v8::Local<v8::Script> script;
{
i::DisallowCompilation no_compile(
reinterpret_cast<i::Isolate*>(isolate2));
- script = v8::ScriptCompiler::Compile(context, &source, option)
+ script = v8::ScriptCompiler::Compile(context, &script_source, option)
.ToLocalChecked();
}
CHECK_EQ(2, script->Run(context)
@@ -24879,11 +24882,11 @@ TEST(CodeCacheScriptModuleMismatch) {
v8::Context::Scope cscope(context);
v8::Local<v8::String> source_string = v8_str(source);
v8::ScriptOrigin script_origin(isolate, v8_str(origin));
- v8::ScriptCompiler::Source source(source_string, script_origin);
+ v8::ScriptCompiler::Source script_source(source_string, script_origin);
v8::ScriptCompiler::CompileOptions option =
v8::ScriptCompiler::kNoCompileOptions;
v8::Local<v8::Script> script =
- v8::ScriptCompiler::Compile(context, &source, option)
+ v8::ScriptCompiler::Compile(context, &script_source, option)
.ToLocalChecked();
cache = v8::ScriptCompiler::CreateCodeCache(script->GetUnboundScript());
}
@@ -25147,7 +25150,7 @@ TEST(SealHandleScopeNested) {
v8::SealHandleScope seal(isolate);
{
- v8::HandleScope handle_scope(isolate);
+ v8::HandleScope inner_handle_scope(isolate);
// Should work
v8::Local<v8::Object> obj = v8::Object::New(isolate);
@@ -26044,7 +26047,7 @@ TEST(CrossActivationEval) {
v8::HandleScope scope(isolate);
{
call_eval_context = v8::Context::New(isolate);
- v8::Context::Scope scope(call_eval_context);
+ v8::Context::Scope context_scope(call_eval_context);
call_eval_bound_function =
Local<Function>::Cast(CompileRun("eval.bind(this, '1')"));
}
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index b811450edf..7052e260dc 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -2532,7 +2532,10 @@ TEST(AssemblerX64Regmove256bit) {
CpuFeatureScope fscope(&masm, AVX);
__ vmovdqa(ymm0, ymm1);
+ __ vmovdqa(ymm4, Operand(rbx, rcx, times_4, 10000));
__ vmovdqu(ymm10, ymm11);
+ __ vmovdqu(ymm9, Operand(rbx, rcx, times_4, 10000));
+ __ vmovdqu(Operand(rbx, rcx, times_4, 10000), ymm0);
CodeDesc desc;
masm.GetCode(isolate, &desc);
@@ -2544,9 +2547,71 @@ TEST(AssemblerX64Regmove256bit) {
#endif
byte expected[] = {// VMOVDQA
+ // vmovdqa ymm0,ymm1
0xC5, 0xFD, 0x6F, 0xC1,
+ // vmovdqa ymm4,YMMWORD PTR [rbx+rcx*4+0x2710]
+ 0xC5, 0xFD, 0x6F, 0xA4, 0x8B, 0x10, 0x27, 0x00, 0x00,
+
// VMOVDQU
- 0xC4, 0x41, 0x7E, 0x7F, 0xDA};
+ // vmovdqu ymm10,ymm11
+ 0xC4, 0x41, 0x7E, 0x7F, 0xDA,
+ // vmovdqu ymm9,YMMWORD PTR [rbx+rcx*4+0x2710]
+ 0xC5, 0x7E, 0x6F, 0x8C, 0x8B, 0x10, 0x27, 0x00, 0x00,
+ // vmovdqu YMMWORD PTR [rbx+rcx*4+0x2710],ymm0
+ 0xC5, 0xFE, 0x7F, 0x84, 0x8B, 0x10, 0x27, 0x00, 0x00};
+ CHECK_EQ(0, memcmp(expected, desc.buffer, sizeof(expected)));
+}
+
+TEST(AssemblerX64LaneOp256bit) {
+ if (!CpuFeatures::IsSupported(AVX2)) return;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ auto buffer = AllocateAssemblerBuffer();
+ Isolate* isolate = CcTest::i_isolate();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
+ CpuFeatureScope fscope(&masm, AVX2);
+
+ __ vpshufd(ymm1, ymm2, 85);
+ __ vpshufd(ymm1, Operand(rbx, rcx, times_4, 10000), 85);
+ __ vpshuflw(ymm9, ymm10, 85);
+ __ vpshuflw(ymm9, Operand(rbx, rcx, times_4, 10000), 85);
+ __ vpshufhw(ymm1, ymm2, 85);
+ __ vpshufhw(ymm1, Operand(rbx, rcx, times_4, 10000), 85);
+ __ vpblendw(ymm2, ymm3, ymm4, 23);
+ __ vpblendw(ymm2, ymm3, Operand(rbx, rcx, times_4, 10000), 23);
+ __ vpalignr(ymm10, ymm11, ymm12, 4);
+ __ vpalignr(ymm10, ymm11, Operand(rbx, rcx, times_4, 10000), 4);
+
+ CodeDesc desc;
+ masm.GetCode(isolate, &desc);
+#ifdef OBJECT_PRINT
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ StdoutStream os;
+ code->Print(os);
+#endif
+
+ byte expected[] = {
+ // vpshufd ymm1, ymm2, 85
+ 0xC5, 0xFD, 0x70, 0xCA, 0x55,
+ // vpshufd ymm1,YMMWORD PTR [rbx+rcx*4+0x2710], 85
+ 0xC5, 0xFD, 0x70, 0x8C, 0x8B, 0x10, 0x27, 0x00, 0x00, 0x55,
+ // vpshuflw ymm9, ymm10, 85,
+ 0xC4, 0x41, 0x7F, 0x70, 0xCA, 0x55,
+ // vpshuflw ymm9,YMMWORD PTR [rbx+rcx*4+0x2710], 85
+ 0xC5, 0x7F, 0x70, 0x8C, 0x8B, 0x10, 0x27, 0x00, 0x00, 0x55,
+ // vpshufhw ymm1, ymm2, 85
+ 0xC5, 0xFE, 0x70, 0xCA, 0x55,
+ // vpshufhw ymm1,YMMWORD PTR [rbx+rcx*4+0x2710], 85
+ 0xC5, 0xFE, 0x70, 0x8C, 0x8B, 0x10, 0x27, 0x00, 0x00, 0x55,
+ // vpblendw ymm2, ymm3, ymm4, 23
+ 0xC4, 0xE3, 0x65, 0x0E, 0xD4, 0x17,
+ // vpblendw ymm2, ymm3, YMMWORD PTR [rbx+rcx*4+0x2710], 23
+ 0xC4, 0xE3, 0x65, 0x0E, 0x94, 0x8B, 0x10, 0x27, 0x00, 0x00, 0x17,
+ // vpalignr ymm10, ymm11, ymm12, 4
+ 0xC4, 0x43, 0x25, 0x0F, 0xD4, 0x04,
+ // vpalignr ymm10, ymm11, YMMWORD PTR [rbx+rcx*4+0x2710], 4
+ 0xC4, 0x63, 0x25, 0x0F, 0x94, 0x8B, 0x10, 0x27, 0x00, 0x00, 0x04};
CHECK_EQ(0, memcmp(expected, desc.buffer, sizeof(expected)));
}
@@ -2562,6 +2627,10 @@ TEST(AssemblerX64FloatingPoint256bit) {
__ vsqrtps(ymm0, ymm1);
__ vunpcklps(ymm2, ymm3, ymm14);
__ vsubps(ymm10, ymm11, ymm12);
+ __ vroundps(ymm9, ymm2, kRoundUp);
+ __ vroundpd(ymm9, ymm2, kRoundToNearest);
+ __ vhaddps(ymm1, ymm2, ymm3);
+ __ vhaddps(ymm0, ymm1, Operand(rbx, rcx, times_4, 10000));
CodeDesc desc;
masm.GetCode(isolate, &desc);
@@ -2577,7 +2646,15 @@ TEST(AssemblerX64FloatingPoint256bit) {
// VUNPCKLPS
0xC4, 0xC1, 0x64, 0x14, 0xD6,
// VSUBPS
- 0xC4, 0x41, 0x24, 0x5C, 0xD4};
+ 0xC4, 0x41, 0x24, 0x5C, 0xD4,
+ // vroundps ymm9, ymm2, 0xA
+ 0xC4, 0x63, 0x7D, 0x08, 0xCA, 0x0A,
+ // vroundpd ymm9, ymm2, 0x8
+ 0xC4, 0x63, 0x7D, 0x09, 0xCA, 0x08,
+ // VHADDPS ymm1, ymm2, ymm3
+ 0xC5, 0xEF, 0x7C, 0xCB,
+ // VHADDPS ymm0, ymm1, YMMWORD PTR [rbx+rcx*4+0x2710]
+ 0xc5, 0xf7, 0x7c, 0x84, 0x8b, 0x10, 0x27, 0x00, 0x00};
CHECK_EQ(0, memcmp(expected, desc.buffer, sizeof(expected)));
}
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index dce6dda1e9..ee83e549d8 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -604,7 +604,7 @@ TEST(TryToName) {
m.Goto(&check_result);
m.BIND(&if_expectedisheapnumber);
- CSA_ASSERT(&m, m.IsHeapNumber(m.CAST(expected_arg)));
+ CSA_DCHECK(&m, m.IsHeapNumber(m.CAST(expected_arg)));
TNode<Float64T> value = m.LoadHeapNumberValue(m.CAST(expected_arg));
// We know this to be safe as all expected values are in intptr
// range.
@@ -3036,12 +3036,15 @@ TEST(NewPromiseCapability) {
handle(JSFunction::cast(result->reject()), isolate)};
for (auto&& callback : callbacks) {
- Handle<Context> context(Context::cast(callback->context()), isolate);
+ Handle<Context> callback_context(Context::cast(callback->context()),
+ isolate);
CHECK_EQ(isolate->root(RootIndex::kEmptyScopeInfo),
- context->scope_info());
- CHECK_EQ(*isolate->native_context(), context->native_context());
- CHECK_EQ(PromiseBuiltins::kPromiseContextLength, context->length());
- CHECK_EQ(context->get(PromiseBuiltins::kPromiseSlot), result->promise());
+ callback_context->scope_info());
+ CHECK_EQ(*isolate->native_context(), callback_context->native_context());
+ CHECK_EQ(PromiseBuiltins::kPromiseContextLength,
+ callback_context->length());
+ CHECK_EQ(callback_context->get(PromiseBuiltins::kPromiseSlot),
+ result->promise());
}
}
diff --git a/deps/v8/test/cctest/test-concurrent-feedback-vector.cc b/deps/v8/test/cctest/test-concurrent-feedback-vector.cc
index 38f7c05ffe..ec6f7079e1 100644
--- a/deps/v8/test/cctest/test-concurrent-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-concurrent-feedback-vector.cc
@@ -68,8 +68,8 @@ class FeedbackVectorExplorationThread final : public v8::base::Thread {
if (state == MONOMORPHIC || state == POLYMORPHIC) {
MapHandles maps;
nexus.ExtractMaps(&maps);
- for (unsigned int i = 0; i < maps.size(); i++) {
- CHECK(maps[i]->IsMap());
+ for (unsigned int j = 0; j < maps.size(); j++) {
+ CHECK(maps[j]->IsMap());
}
}
diff --git a/deps/v8/test/cctest/test-concurrent-script-context-table.cc b/deps/v8/test/cctest/test-concurrent-script-context-table.cc
index be1984c673..d185d0538b 100644
--- a/deps/v8/test/cctest/test-concurrent-script-context-table.cc
+++ b/deps/v8/test/cctest/test-concurrent-script-context-table.cc
@@ -184,10 +184,10 @@ TEST(ScriptContextTable_AccessScriptContextTable) {
sema_started.Wait();
for (; initialized_entries < 1000; ++initialized_entries) {
- Handle<Context> context =
+ Handle<Context> new_context =
factory->NewScriptContext(native_context, scope_info);
script_context_table =
- ScriptContextTable::Extend(script_context_table, context);
+ ScriptContextTable::Extend(script_context_table, new_context);
native_context->synchronized_set_script_context_table(
*script_context_table);
// Update with relaxed semantics to not introduce ordering constraints.
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 00ac78f629..e03536fb75 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -4277,7 +4277,7 @@ TEST(BytecodeFlushEventsEagerLogging) {
// This compile will add the code to the compilation cache.
{
- v8::HandleScope scope(isolate);
+ v8::HandleScope inner_scope(isolate);
CompileRun(source);
}
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 171c963ea0..f98907fa92 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -3269,15 +3269,16 @@ TEST(DebugScriptLineEndsAreAscending) {
v8::HandleScope scope(isolate);
// Compile a test script.
- v8::Local<v8::String> script = v8_str(isolate,
- "function f() {\n"
- " debugger;\n"
- "}\n");
+ v8::Local<v8::String> script_source = v8_str(isolate,
+ "function f() {\n"
+ " debugger;\n"
+ "}\n");
v8::ScriptOrigin origin1 = v8::ScriptOrigin(isolate, v8_str(isolate, "name"));
- v8::Local<v8::Script> script1 =
- v8::Script::Compile(env.local(), script, &origin1).ToLocalChecked();
- USE(script1);
+ v8::Local<v8::Script> script =
+ v8::Script::Compile(env.local(), script_source, &origin1)
+ .ToLocalChecked();
+ USE(script);
Handle<v8::internal::FixedArray> instances;
{
@@ -3287,12 +3288,12 @@ TEST(DebugScriptLineEndsAreAscending) {
CHECK_GT(instances->length(), 0);
for (int i = 0; i < instances->length(); i++) {
- Handle<v8::internal::Script> script = Handle<v8::internal::Script>(
+ Handle<v8::internal::Script> new_script = Handle<v8::internal::Script>(
v8::internal::Script::cast(instances->get(i)), CcTest::i_isolate());
- v8::internal::Script::InitLineEnds(CcTest::i_isolate(), script);
+ v8::internal::Script::InitLineEnds(CcTest::i_isolate(), new_script);
v8::internal::FixedArray ends =
- v8::internal::FixedArray::cast(script->line_ends());
+ v8::internal::FixedArray::cast(new_script->line_ends());
CHECK_GT(ends.length(), 0);
int prev_end = -1;
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index 7acdc0b493..6fe54e02b3 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -493,20 +493,6 @@ TEST(DisasmIa320) {
__ movd(eax, xmm1);
__ movd(Operand(ebx, ecx, times_4, 10000), xmm1);
- __ addsd(xmm1, xmm0);
- __ addsd(xmm1, Operand(ebx, ecx, times_4, 10000));
- __ mulsd(xmm1, xmm0);
- __ mulsd(xmm1, Operand(ebx, ecx, times_4, 10000));
- __ subsd(xmm1, xmm0);
- __ subsd(xmm1, Operand(ebx, ecx, times_4, 10000));
- __ divsd(xmm1, xmm0);
- __ divsd(xmm1, Operand(ebx, ecx, times_4, 10000));
- __ minsd(xmm1, xmm0);
- __ minsd(xmm1, Operand(ebx, ecx, times_4, 10000));
- __ maxsd(xmm1, xmm0);
- __ maxsd(xmm1, Operand(ebx, ecx, times_4, 10000));
- __ sqrtsd(xmm1, xmm0);
- __ sqrtsd(xmm1, Operand(ebx, ecx, times_4, 10000));
__ ucomisd(xmm0, xmm1);
__ cmpltsd(xmm0, xmm1);
@@ -569,6 +555,7 @@ TEST(DisasmIa320) {
__ instruction(xmm5, Operand(edx, 4));
SSE2_INSTRUCTION_LIST(EMIT_SSE2_INSTR)
+ SSE2_INSTRUCTION_LIST_SD(EMIT_SSE2_INSTR)
#undef EMIT_SSE2_INSTR
}
@@ -657,21 +644,6 @@ TEST(DisasmIa320) {
{
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(&assm, AVX);
- __ vaddsd(xmm0, xmm1, xmm2);
- __ vaddsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
- __ vmulsd(xmm0, xmm1, xmm2);
- __ vmulsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
- __ vsubsd(xmm0, xmm1, xmm2);
- __ vsubsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
- __ vdivsd(xmm0, xmm1, xmm2);
- __ vdivsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
- __ vminsd(xmm0, xmm1, xmm2);
- __ vminsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
- __ vmaxsd(xmm0, xmm1, xmm2);
- __ vmaxsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
- __ vsqrtsd(xmm0, xmm1, xmm2);
- __ vsqrtsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
-
__ vaddss(xmm0, xmm1, xmm2);
__ vaddss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
__ vmulss(xmm0, xmm1, xmm2);
@@ -686,6 +658,10 @@ TEST(DisasmIa320) {
__ vmaxss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
__ vsqrtss(xmm0, xmm1, xmm2);
__ vsqrtss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ vucomisd(xmm0, xmm1);
+ __ vucomisd(xmm0, Operand(ebx, ecx, times_4, 10000));
+ __ vucomiss(xmm0, xmm1);
+ __ vucomiss(xmm0, Operand(ebx, ecx, times_4, 10000));
__ vandps(xmm0, xmm1, xmm2);
__ vandps(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
@@ -822,6 +798,15 @@ TEST(DisasmIa320) {
__ vcvttps2dq(xmm1, Operand(ebx, ecx, times_4, 10000));
__ vcvttpd2dq(xmm1, xmm0);
+ __ vcvtsd2ss(xmm2, xmm3, Operand(ebx, ecx, times_4, 10000));
+ __ vcvtsd2ss(xmm2, xmm3, xmm6);
+ __ vcvtss2sd(xmm2, xmm3, Operand(ebx, ecx, times_1, 10000));
+ __ vcvtss2sd(xmm2, xmm3, xmm6);
+ __ vcvttsd2si(eax, Operand(ebx, ecx, times_4, 10000));
+ __ vcvttsd2si(ebx, xmm6);
+ __ vcvttss2si(eax, Operand(ebx, ecx, times_4, 10000));
+ __ vcvttss2si(ebx, xmm6);
+
__ vmovddup(xmm1, xmm2);
__ vmovddup(xmm1, Operand(ebx, ecx, times_4, 10000));
__ vmovshdup(xmm1, xmm2);
@@ -841,11 +826,15 @@ TEST(DisasmIa320) {
__ vpcmpgtq(xmm0, xmm1, xmm2);
+ __ vroundsd(xmm0, xmm3, xmm2, kRoundDown);
+ __ vroundss(xmm0, xmm3, xmm2, kRoundDown);
+
#define EMIT_SSE2_AVXINSTR(instruction, notUsed1, notUsed2, notUsed3) \
__ v##instruction(xmm7, xmm5, xmm1); \
__ v##instruction(xmm7, xmm5, Operand(edx, 4));
SSE2_INSTRUCTION_LIST(EMIT_SSE2_AVXINSTR)
+ SSE2_INSTRUCTION_LIST_SD(EMIT_SSE2_AVXINSTR)
#undef EMIT_SSE2_AVXINSTR
#define EMIT_SSE34_AVXINSTR(instruction, notUsed1, notUsed2, notUsed3, \
diff --git a/deps/v8/test/cctest/test-disasm-loong64.cc b/deps/v8/test/cctest/test-disasm-loong64.cc
index 51549e76d1..5620eb9c69 100644
--- a/deps/v8/test/cctest/test-disasm-loong64.cc
+++ b/deps/v8/test/cctest/test-disasm-loong64.cc
@@ -110,9 +110,9 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
TEST(TypeOp6) {
SET_UP();
- COMPARE(jirl(ra, t7, 0), "4c000261 jirl ra, t7, 0");
- COMPARE(jirl(ra, t7, 32767), "4dfffe61 jirl ra, t7, 32767");
- COMPARE(jirl(ra, t7, -32768), "4e000261 jirl ra, t7, -32768");
+ COMPARE(jirl(ra, t7, 0), "4c000261 jirl ra, t7, 0x0");
+ COMPARE(jirl(ra, t7, 32767), "4dfffe61 jirl ra, t7, 0x1fffc");
+ COMPARE(jirl(ra, t7, -32768), "4e000261 jirl ra, t7, 0x20000");
VERIFY_RUN();
}
@@ -120,69 +120,71 @@ TEST(TypeOp6) {
TEST(TypeOp6PC) {
SET_UP();
- COMPARE_PC_REL(beqz(t7, 1048575), "43fffe6f beqz t7, 1048575",
+ COMPARE_PC_REL(beqz(t7, 1048575), "43fffe6f beqz t7, 0x3ffffc",
1048575);
- COMPARE_PC_REL(beqz(t0, -1048576), "40000190 beqz t0, -1048576",
+ COMPARE_PC_REL(beqz(t0, -1048576), "40000190 beqz t0, 0x400000",
-1048576);
- COMPARE_PC_REL(beqz(t1, 0), "400001a0 beqz t1, 0", 0);
+ COMPARE_PC_REL(beqz(t1, 0), "400001a0 beqz t1, 0x0", 0);
- COMPARE_PC_REL(bnez(a2, 1048575), "47fffccf bnez a2, 1048575",
+ COMPARE_PC_REL(bnez(a2, 1048575), "47fffccf bnez a2, 0x3ffffc",
1048575);
- COMPARE_PC_REL(bnez(s3, -1048576), "44000350 bnez s3, -1048576",
+ COMPARE_PC_REL(bnez(s3, -1048576), "44000350 bnez s3, 0x400000",
-1048576);
- COMPARE_PC_REL(bnez(t8, 0), "44000280 bnez t8, 0", 0);
+ COMPARE_PC_REL(bnez(t8, 0), "44000280 bnez t8, 0x0", 0);
- COMPARE_PC_REL(bceqz(FCC0, 1048575), "4bfffc0f bceqz fcc0, 1048575",
- 1048575);
+ COMPARE_PC_REL(bceqz(FCC0, 1048575),
+ "4bfffc0f bceqz fcc0, 0x3ffffc", 1048575);
COMPARE_PC_REL(bceqz(FCC0, -1048576),
- "48000010 bceqz fcc0, -1048576", -1048576);
- COMPARE_PC_REL(bceqz(FCC0, 0), "48000000 bceqz fcc0, 0", 0);
+ "48000010 bceqz fcc0, 0x400000", -1048576);
+ COMPARE_PC_REL(bceqz(FCC0, 0), "48000000 bceqz fcc0, 0x0", 0);
- COMPARE_PC_REL(bcnez(FCC0, 1048575), "4bfffd0f bcnez fcc0, 1048575",
- 1048575);
+ COMPARE_PC_REL(bcnez(FCC0, 1048575),
+ "4bfffd0f bcnez fcc0, 0x3ffffc", 1048575);
COMPARE_PC_REL(bcnez(FCC0, -1048576),
- "48000110 bcnez fcc0, -1048576", -1048576);
- COMPARE_PC_REL(bcnez(FCC0, 0), "48000100 bcnez fcc0, 0", 0);
-
- COMPARE_PC_REL(b(33554431), "53fffdff b 33554431", 33554431);
- COMPARE_PC_REL(b(-33554432), "50000200 b -33554432", -33554432);
- COMPARE_PC_REL(b(0), "50000000 b 0", 0);
-
- COMPARE_PC_REL(beq(t0, a6, 32767), "59fffd8a beq t0, a6, 32767",
- 32767);
- COMPARE_PC_REL(beq(t1, a0, -32768), "5a0001a4 beq t1, a0, -32768",
- -32768);
- COMPARE_PC_REL(beq(a4, t1, 0), "5800010d beq a4, t1, 0", 0);
-
- COMPARE_PC_REL(bne(a3, a4, 32767), "5dfffce8 bne a3, a4, 32767",
- 32767);
- COMPARE_PC_REL(bne(a6, a5, -32768), "5e000149 bne a6, a5, -32768",
- -32768);
- COMPARE_PC_REL(bne(a4, a5, 0), "5c000109 bne a4, a5, 0", 0);
-
- COMPARE_PC_REL(blt(a4, a6, 32767), "61fffd0a blt a4, a6, 32767",
- 32767);
- COMPARE_PC_REL(blt(a4, a5, -32768), "62000109 blt a4, a5, -32768",
- -32768);
- COMPARE_PC_REL(blt(a4, a6, 0), "6000010a blt a4, a6, 0", 0);
-
- COMPARE_PC_REL(bge(s7, a5, 32767), "65ffffc9 bge s7, a5, 32767",
- 32767);
- COMPARE_PC_REL(bge(a1, a3, -32768), "660000a7 bge a1, a3, -32768",
- -32768);
- COMPARE_PC_REL(bge(a5, s3, 0), "6400013a bge a5, s3, 0", 0);
-
- COMPARE_PC_REL(bltu(a5, s7, 32767), "69fffd3e bltu a5, s7, 32767",
- 32767);
- COMPARE_PC_REL(bltu(a4, a5, -32768), "6a000109 bltu a4, a5, -32768",
- -32768);
- COMPARE_PC_REL(bltu(a4, t6, 0), "68000112 bltu a4, t6, 0", 0);
-
- COMPARE_PC_REL(bgeu(a7, a6, 32767), "6dfffd6a bgeu a7, a6, 32767",
- 32767);
- COMPARE_PC_REL(bgeu(a5, a3, -32768), "6e000127 bgeu a5, a3, -32768",
- -32768);
- COMPARE_PC_REL(bgeu(t2, t1, 0), "6c0001cd bgeu t2, t1, 0", 0);
+ "48000110 bcnez fcc0, 0x400000", -1048576);
+ COMPARE_PC_REL(bcnez(FCC0, 0), "48000100 bcnez fcc0, 0x0", 0);
+
+ COMPARE_PC_REL(b(33554431), "53fffdff b 0x7fffffc",
+ 33554431);
+ COMPARE_PC_REL(b(-33554432), "50000200 b 0x8000000",
+ -33554432);
+ COMPARE_PC_REL(b(0), "50000000 b 0x0", 0);
+
+ COMPARE_PC_REL(beq(t0, a6, 32767),
+ "59fffd8a beq t0, a6, 0x1fffc", 32767);
+ COMPARE_PC_REL(beq(t1, a0, -32768),
+ "5a0001a4 beq t1, a0, 0x20000", -32768);
+ COMPARE_PC_REL(beq(a4, t1, 0), "5800010d beq a4, t1, 0x0", 0);
+
+ COMPARE_PC_REL(bne(a3, a4, 32767),
+ "5dfffce8 bne a3, a4, 0x1fffc", 32767);
+ COMPARE_PC_REL(bne(a6, a5, -32768),
+ "5e000149 bne a6, a5, 0x20000", -32768);
+ COMPARE_PC_REL(bne(a4, a5, 0), "5c000109 bne a4, a5, 0x0", 0);
+
+ COMPARE_PC_REL(blt(a4, a6, 32767),
+ "61fffd0a blt a4, a6, 0x1fffc", 32767);
+ COMPARE_PC_REL(blt(a4, a5, -32768),
+ "62000109 blt a4, a5, 0x20000", -32768);
+ COMPARE_PC_REL(blt(a4, a6, 0), "6000010a blt a4, a6, 0x0", 0);
+
+ COMPARE_PC_REL(bge(s7, a5, 32767),
+ "65ffffc9 bge s7, a5, 0x1fffc", 32767);
+ COMPARE_PC_REL(bge(a1, a3, -32768),
+ "660000a7 bge a1, a3, 0x20000", -32768);
+ COMPARE_PC_REL(bge(a5, s3, 0), "6400013a bge a5, s3, 0x0", 0);
+
+ COMPARE_PC_REL(bltu(a5, s7, 32767),
+ "69fffd3e bltu a5, s7, 0x1fffc", 32767);
+ COMPARE_PC_REL(bltu(a4, a5, -32768),
+ "6a000109 bltu a4, a5, 0x20000", -32768);
+ COMPARE_PC_REL(bltu(a4, t6, 0), "68000112 bltu a4, t6, 0x0", 0);
+
+ COMPARE_PC_REL(bgeu(a7, a6, 32767),
+ "6dfffd6a bgeu a7, a6, 0x1fffc", 32767);
+ COMPARE_PC_REL(bgeu(a5, a3, -32768),
+ "6e000127 bgeu a5, a3, 0x20000", -32768);
+ COMPARE_PC_REL(bgeu(t2, t1, 0), "6c0001cd bgeu t2, t1, 0x0", 0);
VERIFY_RUN();
}
@@ -190,30 +192,30 @@ TEST(TypeOp6PC) {
TEST(TypeOp7) {
SET_UP();
- COMPARE(lu12i_w(a4, 524287), "14ffffe8 lu12i.w a4, 524287");
- COMPARE(lu12i_w(a5, -524288), "15000009 lu12i.w a5, -524288");
- COMPARE(lu12i_w(a6, 0), "1400000a lu12i.w a6, 0");
+ COMPARE(lu12i_w(a4, 524287), "14ffffe8 lu12i.w a4, 0x7ffff");
+ COMPARE(lu12i_w(a5, -524288), "15000009 lu12i.w a5, 0x80000");
+ COMPARE(lu12i_w(a6, 0), "1400000a lu12i.w a6, 0x0");
- COMPARE(lu32i_d(a7, 524287), "16ffffeb lu32i.d a7, 524287");
- COMPARE(lu32i_d(t0, 524288), "1700000c lu32i.d t0, -524288");
- COMPARE(lu32i_d(t1, 0), "1600000d lu32i.d t1, 0");
+ COMPARE(lu32i_d(a7, 524287), "16ffffeb lu32i.d a7, 0x7ffff");
+ COMPARE(lu32i_d(t0, -524288), "1700000c lu32i.d t0, 0x80000");
+ COMPARE(lu32i_d(t1, 0), "1600000d lu32i.d t1, 0x0");
- COMPARE(pcaddi(t1, 1), "1800002d pcaddi t1, 1");
- COMPARE(pcaddi(t2, 524287), "18ffffee pcaddi t2, 524287");
- COMPARE(pcaddi(t3, -524288), "1900000f pcaddi t3, -524288");
- COMPARE(pcaddi(t4, 0), "18000010 pcaddi t4, 0");
+ COMPARE(pcaddi(t1, 1), "1800002d pcaddi t1, 0x1");
+ COMPARE(pcaddi(t2, 524287), "18ffffee pcaddi t2, 0x7ffff");
+ COMPARE(pcaddi(t3, -524288), "1900000f pcaddi t3, 0x80000");
+ COMPARE(pcaddi(t4, 0), "18000010 pcaddi t4, 0x0");
- COMPARE(pcalau12i(t5, 524287), "1afffff1 pcalau12i t5, 524287");
- COMPARE(pcalau12i(t6, -524288), "1b000012 pcalau12i t6, -524288");
- COMPARE(pcalau12i(a4, 0), "1a000008 pcalau12i a4, 0");
+ COMPARE(pcalau12i(t5, 524287), "1afffff1 pcalau12i t5, 0x7ffff");
+ COMPARE(pcalau12i(t6, -524288), "1b000012 pcalau12i t6, 0x80000");
+ COMPARE(pcalau12i(a4, 0), "1a000008 pcalau12i a4, 0x0");
- COMPARE(pcaddu12i(a5, 524287), "1cffffe9 pcaddu12i a5, 524287");
- COMPARE(pcaddu12i(a6, -524288), "1d00000a pcaddu12i a6, -524288");
- COMPARE(pcaddu12i(a7, 0), "1c00000b pcaddu12i a7, 0");
+ COMPARE(pcaddu12i(a5, 524287), "1cffffe9 pcaddu12i a5, 0x7ffff");
+ COMPARE(pcaddu12i(a6, -524288), "1d00000a pcaddu12i a6, 0x80000");
+ COMPARE(pcaddu12i(a7, 0), "1c00000b pcaddu12i a7, 0x0");
- COMPARE(pcaddu18i(t0, 524287), "1effffec pcaddu18i t0, 524287");
- COMPARE(pcaddu18i(t1, -524288), "1f00000d pcaddu18i t1, -524288");
- COMPARE(pcaddu18i(t2, 0), "1e00000e pcaddu18i t2, 0");
+ COMPARE(pcaddu18i(t0, 524287), "1effffec pcaddu18i t0, 0x7ffff");
+ COMPARE(pcaddu18i(t1, -524288), "1f00000d pcaddu18i t1, 0x80000");
+ COMPARE(pcaddu18i(t2, 0), "1e00000e pcaddu18i t2, 0x0");
VERIFY_RUN();
}
@@ -221,37 +223,53 @@ TEST(TypeOp7) {
TEST(TypeOp8) {
SET_UP();
- COMPARE(ll_w(t2, t3, 32764), "207ffdee ll.w t2, t3, 32764");
- COMPARE(ll_w(t3, t4, -32768), "2080020f ll.w t3, t4, -32768");
- COMPARE(ll_w(t5, t6, 0), "20000251 ll.w t5, t6, 0");
-
- COMPARE(sc_w(a6, a7, 32764), "217ffd6a sc.w a6, a7, 32764");
- COMPARE(sc_w(t0, t1, -32768), "218001ac sc.w t0, t1, -32768");
- COMPARE(sc_w(t2, t3, 0), "210001ee sc.w t2, t3, 0");
-
- COMPARE(ll_d(a0, a1, 32764), "227ffca4 ll.d a0, a1, 32764");
- COMPARE(ll_d(a2, a3, -32768), "228000e6 ll.d a2, a3, -32768");
- COMPARE(ll_d(a4, a5, 0), "22000128 ll.d a4, a5, 0");
-
- COMPARE(sc_d(t4, t5, 32764), "237ffe30 sc.d t4, t5, 32764");
- COMPARE(sc_d(t6, a0, -32768), "23800092 sc.d t6, a0, -32768");
- COMPARE(sc_d(a1, a2, 0), "230000c5 sc.d a1, a2, 0");
-
- COMPARE(ldptr_w(a4, a5, 32764), "247ffd28 ldptr.w a4, a5, 32764");
- COMPARE(ldptr_w(a6, a7, -32768), "2480016a ldptr.w a6, a7, -32768");
- COMPARE(ldptr_w(t0, t1, 0), "240001ac ldptr.w t0, t1, 0");
-
- COMPARE(stptr_w(a4, a5, 32764), "257ffd28 stptr.w a4, a5, 32764");
- COMPARE(stptr_w(a6, a7, -32768), "2580016a stptr.w a6, a7, -32768");
- COMPARE(stptr_w(t0, t1, 0), "250001ac stptr.w t0, t1, 0");
-
- COMPARE(ldptr_d(t2, t3, 32764), "267ffdee ldptr.d t2, t3, 32764");
- COMPARE(ldptr_d(t4, t5, -32768), "26800230 ldptr.d t4, t5, -32768");
- COMPARE(ldptr_d(t6, a4, 0), "26000112 ldptr.d t6, a4, 0");
-
- COMPARE(stptr_d(a5, a6, 32764), "277ffd49 stptr.d a5, a6, 32764");
- COMPARE(stptr_d(a7, t0, -32768), "2780018b stptr.d a7, t0, -32768");
- COMPARE(stptr_d(t1, t2, 0), "270001cd stptr.d t1, t2, 0");
+ COMPARE(ll_w(t2, t3, 32764),
+ "207ffdee ll.w t2, t3, 32764(0x7ffc)");
+ COMPARE(ll_w(t3, t4, -32768),
+ "2080020f ll.w t3, t4, -32768(0x8000)");
+ COMPARE(ll_w(t5, t6, 0), "20000251 ll.w t5, t6, 0(0x0)");
+
+ COMPARE(sc_w(a6, a7, 32764),
+ "217ffd6a sc.w a6, a7, 32764(0x7ffc)");
+ COMPARE(sc_w(t0, t1, -32768),
+ "218001ac sc.w t0, t1, -32768(0x8000)");
+ COMPARE(sc_w(t2, t3, 0), "210001ee sc.w t2, t3, 0(0x0)");
+
+ COMPARE(ll_d(a0, a1, 32764),
+ "227ffca4 ll.d a0, a1, 32764(0x7ffc)");
+ COMPARE(ll_d(a2, a3, -32768),
+ "228000e6 ll.d a2, a3, -32768(0x8000)");
+ COMPARE(ll_d(a4, a5, 0), "22000128 ll.d a4, a5, 0(0x0)");
+
+ COMPARE(sc_d(t4, t5, 32764),
+ "237ffe30 sc.d t4, t5, 32764(0x7ffc)");
+ COMPARE(sc_d(t6, a0, -32768),
+ "23800092 sc.d t6, a0, -32768(0x8000)");
+ COMPARE(sc_d(a1, a2, 0), "230000c5 sc.d a1, a2, 0(0x0)");
+
+ COMPARE(ldptr_w(a4, a5, 32764),
+ "247ffd28 ldptr.w a4, a5, 32764(0x7ffc)");
+ COMPARE(ldptr_w(a6, a7, -32768),
+ "2480016a ldptr.w a6, a7, -32768(0x8000)");
+ COMPARE(ldptr_w(t0, t1, 0), "240001ac ldptr.w t0, t1, 0(0x0)");
+
+ COMPARE(stptr_w(a4, a5, 32764),
+ "257ffd28 stptr.w a4, a5, 32764(0x7ffc)");
+ COMPARE(stptr_w(a6, a7, -32768),
+ "2580016a stptr.w a6, a7, -32768(0x8000)");
+ COMPARE(stptr_w(t0, t1, 0), "250001ac stptr.w t0, t1, 0(0x0)");
+
+ COMPARE(ldptr_d(t2, t3, 32764),
+ "267ffdee ldptr.d t2, t3, 32764(0x7ffc)");
+ COMPARE(ldptr_d(t4, t5, -32768),
+ "26800230 ldptr.d t4, t5, -32768(0x8000)");
+ COMPARE(ldptr_d(t6, a4, 0), "26000112 ldptr.d t6, a4, 0(0x0)");
+
+ COMPARE(stptr_d(a5, a6, 32764),
+ "277ffd49 stptr.d a5, a6, 32764(0x7ffc)");
+ COMPARE(stptr_d(a7, t0, -32768),
+ "2780018b stptr.d a7, t0, -32768(0x8000)");
+ COMPARE(stptr_d(t1, t2, 0), "270001cd stptr.d t1, t2, 0(0x0)");
VERIFY_RUN();
}
@@ -260,94 +278,131 @@ TEST(TypeOp10) {
SET_UP();
COMPARE(bstrins_w(a4, a5, 31, 16),
- "007f4128 bstrins.w a4, a5, 31, 16");
- COMPARE(bstrins_w(a6, a7, 5, 0), "0065016a bstrins.w a6, a7, 5, 0");
+ "007f4128 bstrins.w a4, a5, 31, 16");
+ COMPARE(bstrins_w(a6, a7, 5, 0), "0065016a bstrins.w a6, a7, 5, 0");
COMPARE(bstrins_d(a3, zero_reg, 17, 0),
- "00910007 bstrins.d a3, zero_reg, 17, 0");
+ "00910007 bstrins.d a3, zero_reg, 17, 0");
COMPARE(bstrins_d(t1, zero_reg, 17, 0),
- "0091000d bstrins.d t1, zero_reg, 17, 0");
+ "0091000d bstrins.d t1, zero_reg, 17, 0");
COMPARE(bstrpick_w(t0, t1, 31, 29),
- "007ff5ac bstrpick.w t0, t1, 31, 29");
+ "007ff5ac bstrpick.w t0, t1, 31, 29");
COMPARE(bstrpick_w(a4, a5, 16, 0),
- "00708128 bstrpick.w a4, a5, 16, 0");
+ "00708128 bstrpick.w a4, a5, 16, 0");
COMPARE(bstrpick_d(a5, a5, 31, 0),
- "00df0129 bstrpick.d a5, a5, 31, 0");
+ "00df0129 bstrpick.d a5, a5, 31, 0");
COMPARE(bstrpick_d(a4, a4, 25, 2),
- "00d90908 bstrpick.d a4, a4, 25, 2");
+ "00d90908 bstrpick.d a4, a4, 25, 2");
- COMPARE(slti(t2, a5, 2047), "021ffd2e slti t2, a5, 2047");
- COMPARE(slti(a7, a1, -2048), "022000ab slti a7, a1, -2048");
+ COMPARE(slti(t2, a5, 2047),
+ "021ffd2e slti t2, a5, 2047(0x7ff)");
+ COMPARE(slti(a7, a1, -2048),
+ "022000ab slti a7, a1, -2048(0x800)");
- COMPARE(sltui(a7, a7, 2047), "025ffd6b sltui a7, a7, 2047");
- COMPARE(sltui(t1, t1, -2048), "026001ad sltui t1, t1, -2048");
+ COMPARE(sltui(a7, a7, 2047),
+ "025ffd6b sltui a7, a7, 2047(0x7ff)");
+ COMPARE(sltui(t1, t1, -2048),
+ "026001ad sltui t1, t1, -2048(0x800)");
- COMPARE(addi_w(t0, t2, 2047), "029ffdcc addi.w t0, t2, 2047");
- COMPARE(addi_w(a0, a0, -2048), "02a00084 addi.w a0, a0, -2048");
+ COMPARE(addi_w(t0, t2, 2047),
+ "029ffdcc addi.w t0, t2, 2047(0x7ff)");
+ COMPARE(addi_w(a0, a0, -2048),
+ "02a00084 addi.w a0, a0, -2048(0x800)");
COMPARE(addi_d(a0, zero_reg, 2047),
- "02dffc04 addi.d a0, zero_reg, 2047");
- COMPARE(addi_d(t7, t7, -2048), "02e00273 addi.d t7, t7, -2048");
-
- COMPARE(lu52i_d(a0, a0, 2047), "031ffc84 lu52i.d a0, a0, 2047");
- COMPARE(lu52i_d(a1, a1, -2048), "032000a5 lu52i.d a1, a1, -2048");
-
- COMPARE(andi(s3, a3, 0xfff), "037ffcfa andi s3, a3, 0xfff");
- COMPARE(andi(a4, a4, 0), "03400108 andi a4, a4, 0x0");
-
- COMPARE(ori(t6, t6, 0xfff), "03bffe52 ori t6, t6, 0xfff");
- COMPARE(ori(t6, t6, 0), "03800252 ori t6, t6, 0x0");
-
- COMPARE(xori(t1, t1, 0xfff), "03fffdad xori t1, t1, 0xfff");
- COMPARE(xori(a3, a3, 0x0), "03c000e7 xori a3, a3, 0x0");
-
- COMPARE(ld_b(a1, a1, 2047), "281ffca5 ld.b a1, a1, 2047");
- COMPARE(ld_b(a4, a4, -2048), "28200108 ld.b a4, a4, -2048");
-
- COMPARE(ld_h(a4, a0, 2047), "285ffc88 ld.h a4, a0, 2047");
- COMPARE(ld_h(a4, a3, -2048), "286000e8 ld.h a4, a3, -2048");
-
- COMPARE(ld_w(a6, a6, 2047), "289ffd4a ld.w a6, a6, 2047");
- COMPARE(ld_w(a5, a4, -2048), "28a00109 ld.w a5, a4, -2048");
-
- COMPARE(ld_d(a0, a3, 2047), "28dffce4 ld.d a0, a3, 2047");
- COMPARE(ld_d(a6, fp, -2048), "28e002ca ld.d a6, fp, -2048");
- COMPARE(ld_d(a0, a6, 0), "28c00144 ld.d a0, a6, 0");
-
- COMPARE(st_b(a4, a0, 2047), "291ffc88 st.b a4, a0, 2047");
- COMPARE(st_b(a6, a5, -2048), "2920012a st.b a6, a5, -2048");
-
- COMPARE(st_h(a4, a0, 2047), "295ffc88 st.h a4, a0, 2047");
- COMPARE(st_h(t1, t2, -2048), "296001cd st.h t1, t2, -2048");
-
- COMPARE(st_w(t3, a4, 2047), "299ffd0f st.w t3, a4, 2047");
- COMPARE(st_w(a3, t2, -2048), "29a001c7 st.w a3, t2, -2048");
-
- COMPARE(st_d(s3, sp, 2047), "29dffc7a st.d s3, sp, 2047");
- COMPARE(st_d(fp, s6, -2048), "29e003b6 st.d fp, s6, -2048");
-
- COMPARE(ld_bu(a6, a0, 2047), "2a1ffc8a ld.bu a6, a0, 2047");
- COMPARE(ld_bu(a7, a7, -2048), "2a20016b ld.bu a7, a7, -2048");
-
- COMPARE(ld_hu(a7, a7, 2047), "2a5ffd6b ld.hu a7, a7, 2047");
- COMPARE(ld_hu(a3, a3, -2048), "2a6000e7 ld.hu a3, a3, -2048");
-
- COMPARE(ld_wu(a3, a0, 2047), "2a9ffc87 ld.wu a3, a0, 2047");
- COMPARE(ld_wu(a3, a5, -2048), "2aa00127 ld.wu a3, a5, -2048");
-
- COMPARE(fld_s(f0, a3, 2047), "2b1ffce0 fld.s f0, a3, 2047");
- COMPARE(fld_s(f0, a1, -2048), "2b2000a0 fld.s f0, a1, -2048");
-
- COMPARE(fld_d(f0, a0, 2047), "2b9ffc80 fld.d f0, a0, 2047");
- COMPARE(fld_d(f0, fp, -2048), "2ba002c0 fld.d f0, fp, -2048");
-
- COMPARE(fst_d(f0, fp, 2047), "2bdffec0 fst.d f0, fp, 2047");
- COMPARE(fst_d(f0, a0, -2048), "2be00080 fst.d f0, a0, -2048");
-
- COMPARE(fst_s(f0, a5, 2047), "2b5ffd20 fst.s f0, a5, 2047");
- COMPARE(fst_s(f0, a3, -2048), "2b6000e0 fst.s f0, a3, -2048");
+ "02dffc04 addi.d a0, zero_reg, 2047(0x7ff)");
+ COMPARE(addi_d(t7, t7, -2048),
+ "02e00273 addi.d t7, t7, -2048(0x800)");
+
+ COMPARE(lu52i_d(a0, a0, 2047), "031ffc84 lu52i.d a0, a0, 0x7ff");
+ COMPARE(lu52i_d(a1, a1, -2048), "032000a5 lu52i.d a1, a1, 0x800");
+
+ COMPARE(andi(s3, a3, 0xfff), "037ffcfa andi s3, a3, 0xfff");
+ COMPARE(andi(a4, a4, 0), "03400108 andi a4, a4, 0x0");
+
+ COMPARE(ori(t6, t6, 0xfff), "03bffe52 ori t6, t6, 0xfff");
+ COMPARE(ori(t6, t6, 0), "03800252 ori t6, t6, 0x0");
+
+ COMPARE(xori(t1, t1, 0xfff), "03fffdad xori t1, t1, 0xfff");
+ COMPARE(xori(a3, a3, 0x0), "03c000e7 xori a3, a3, 0x0");
+
+ COMPARE(ld_b(a1, a1, 2047),
+ "281ffca5 ld.b a1, a1, 2047(0x7ff)");
+ COMPARE(ld_b(a4, a4, -2048),
+ "28200108 ld.b a4, a4, -2048(0x800)");
+
+ COMPARE(ld_h(a4, a0, 2047),
+ "285ffc88 ld.h a4, a0, 2047(0x7ff)");
+ COMPARE(ld_h(a4, a3, -2048),
+ "286000e8 ld.h a4, a3, -2048(0x800)");
+
+ COMPARE(ld_w(a6, a6, 2047),
+ "289ffd4a ld.w a6, a6, 2047(0x7ff)");
+ COMPARE(ld_w(a5, a4, -2048),
+ "28a00109 ld.w a5, a4, -2048(0x800)");
+
+ COMPARE(ld_d(a0, a3, 2047),
+ "28dffce4 ld.d a0, a3, 2047(0x7ff)");
+ COMPARE(ld_d(a6, fp, -2048),
+ "28e002ca ld.d a6, fp, -2048(0x800)");
+ COMPARE(ld_d(a0, a6, 0), "28c00144 ld.d a0, a6, 0(0x0)");
+
+ COMPARE(st_b(a4, a0, 2047),
+ "291ffc88 st.b a4, a0, 2047(0x7ff)");
+ COMPARE(st_b(a6, a5, -2048),
+ "2920012a st.b a6, a5, -2048(0x800)");
+
+ COMPARE(st_h(a4, a0, 2047),
+ "295ffc88 st.h a4, a0, 2047(0x7ff)");
+ COMPARE(st_h(t1, t2, -2048),
+ "296001cd st.h t1, t2, -2048(0x800)");
+
+ COMPARE(st_w(t3, a4, 2047),
+ "299ffd0f st.w t3, a4, 2047(0x7ff)");
+ COMPARE(st_w(a3, t2, -2048),
+ "29a001c7 st.w a3, t2, -2048(0x800)");
+
+ COMPARE(st_d(s3, sp, 2047),
+ "29dffc7a st.d s3, sp, 2047(0x7ff)");
+ COMPARE(st_d(fp, s6, -2048),
+ "29e003b6 st.d fp, s6, -2048(0x800)");
+
+ COMPARE(ld_bu(a6, a0, 2047),
+ "2a1ffc8a ld.bu a6, a0, 2047(0x7ff)");
+ COMPARE(ld_bu(a7, a7, -2048),
+ "2a20016b ld.bu a7, a7, -2048(0x800)");
+
+ COMPARE(ld_hu(a7, a7, 2047),
+ "2a5ffd6b ld.hu a7, a7, 2047(0x7ff)");
+ COMPARE(ld_hu(a3, a3, -2048),
+ "2a6000e7 ld.hu a3, a3, -2048(0x800)");
+
+ COMPARE(ld_wu(a3, a0, 2047),
+ "2a9ffc87 ld.wu a3, a0, 2047(0x7ff)");
+ COMPARE(ld_wu(a3, a5, -2048),
+ "2aa00127 ld.wu a3, a5, -2048(0x800)");
+
+ COMPARE(fld_s(f0, a3, 2047),
+ "2b1ffce0 fld.s f0, a3, 2047(0x7ff)");
+ COMPARE(fld_s(f0, a1, -2048),
+ "2b2000a0 fld.s f0, a1, -2048(0x800)");
+
+ COMPARE(fld_d(f0, a0, 2047),
+ "2b9ffc80 fld.d f0, a0, 2047(0x7ff)");
+ COMPARE(fld_d(f0, fp, -2048),
+ "2ba002c0 fld.d f0, fp, -2048(0x800)");
+
+ COMPARE(fst_d(f0, fp, 2047),
+ "2bdffec0 fst.d f0, fp, 2047(0x7ff)");
+ COMPARE(fst_d(f0, a0, -2048),
+ "2be00080 fst.d f0, a0, -2048(0x800)");
+
+ COMPARE(fst_s(f0, a5, 2047),
+ "2b5ffd20 fst.s f0, a5, 2047(0x7ff)");
+ COMPARE(fst_s(f0, a3, -2048),
+ "2b6000e0 fst.s f0, a3, -2048(0x800)");
VERIFY_RUN();
}
@@ -355,21 +410,25 @@ TEST(TypeOp10) {
TEST(TypeOp12) {
SET_UP();
- COMPARE(fmadd_s(f0, f1, f2, f3), "08118820 fmadd.s f0, f1, f2, f3");
- COMPARE(fmadd_s(f4, f5, f6, f7), "081398a4 fmadd.s f4, f5, f6, f7");
+ COMPARE(fmadd_s(f0, f1, f2, f3),
+ "08118820 fmadd.s f0, f1, f2, f3");
+ COMPARE(fmadd_s(f4, f5, f6, f7),
+ "081398a4 fmadd.s f4, f5, f6, f7");
COMPARE(fmadd_d(f8, f9, f10, f11),
- "0825a928 fmadd.d f8, f9, f10, f11");
+ "0825a928 fmadd.d f8, f9, f10, f11");
COMPARE(fmadd_d(f12, f13, f14, f15),
- "0827b9ac fmadd.d f12, f13, f14, f15");
+ "0827b9ac fmadd.d f12, f13, f14, f15");
- COMPARE(fmsub_s(f0, f1, f2, f3), "08518820 fmsub.s f0, f1, f2, f3");
- COMPARE(fmsub_s(f4, f5, f6, f7), "085398a4 fmsub.s f4, f5, f6, f7");
+ COMPARE(fmsub_s(f0, f1, f2, f3),
+ "08518820 fmsub.s f0, f1, f2, f3");
+ COMPARE(fmsub_s(f4, f5, f6, f7),
+ "085398a4 fmsub.s f4, f5, f6, f7");
COMPARE(fmsub_d(f8, f9, f10, f11),
- "0865a928 fmsub.d f8, f9, f10, f11");
+ "0865a928 fmsub.d f8, f9, f10, f11");
COMPARE(fmsub_d(f12, f13, f14, f15),
- "0867b9ac fmsub.d f12, f13, f14, f15");
+ "0867b9ac fmsub.d f12, f13, f14, f15");
COMPARE(fnmadd_s(f0, f1, f2, f3),
"08918820 fnmadd.s f0, f1, f2, f3");
@@ -392,102 +451,102 @@ TEST(TypeOp12) {
"08e7b9ac fnmsub.d f12, f13, f14, f15");
COMPARE(fcmp_cond_s(CAF, f1, f2, FCC0),
- "0c100820 fcmp.caf.s fcc0, f1, f2");
+ "0c100820 fcmp.caf.s fcc0, f1, f2");
COMPARE(fcmp_cond_s(CUN, f5, f6, FCC0),
- "0c1418a0 fcmp.cun.s fcc0, f5, f6");
+ "0c1418a0 fcmp.cun.s fcc0, f5, f6");
COMPARE(fcmp_cond_s(CEQ, f9, f10, FCC0),
- "0c122920 fcmp.ceq.s fcc0, f9, f10");
+ "0c122920 fcmp.ceq.s fcc0, f9, f10");
COMPARE(fcmp_cond_s(CUEQ, f13, f14, FCC0),
- "0c1639a0 fcmp.cueq.s fcc0, f13, f14");
+ "0c1639a0 fcmp.cueq.s fcc0, f13, f14");
COMPARE(fcmp_cond_s(CLT, f1, f2, FCC0),
- "0c110820 fcmp.clt.s fcc0, f1, f2");
+ "0c110820 fcmp.clt.s fcc0, f1, f2");
COMPARE(fcmp_cond_s(CULT, f5, f6, FCC0),
- "0c1518a0 fcmp.cult.s fcc0, f5, f6");
+ "0c1518a0 fcmp.cult.s fcc0, f5, f6");
COMPARE(fcmp_cond_s(CLE, f9, f10, FCC0),
- "0c132920 fcmp.cle.s fcc0, f9, f10");
+ "0c132920 fcmp.cle.s fcc0, f9, f10");
COMPARE(fcmp_cond_s(CULE, f13, f14, FCC0),
- "0c1739a0 fcmp.cule.s fcc0, f13, f14");
+ "0c1739a0 fcmp.cule.s fcc0, f13, f14");
COMPARE(fcmp_cond_s(CNE, f1, f2, FCC0),
- "0c180820 fcmp.cne.s fcc0, f1, f2");
+ "0c180820 fcmp.cne.s fcc0, f1, f2");
COMPARE(fcmp_cond_s(COR, f5, f6, FCC0),
- "0c1a18a0 fcmp.cor.s fcc0, f5, f6");
+ "0c1a18a0 fcmp.cor.s fcc0, f5, f6");
COMPARE(fcmp_cond_s(CUNE, f9, f10, FCC0),
- "0c1c2920 fcmp.cune.s fcc0, f9, f10");
+ "0c1c2920 fcmp.cune.s fcc0, f9, f10");
COMPARE(fcmp_cond_s(SAF, f13, f14, FCC0),
- "0c10b9a0 fcmp.saf.s fcc0, f13, f14");
+ "0c10b9a0 fcmp.saf.s fcc0, f13, f14");
COMPARE(fcmp_cond_s(SUN, f1, f2, FCC0),
- "0c148820 fcmp.sun.s fcc0, f1, f2");
+ "0c148820 fcmp.sun.s fcc0, f1, f2");
COMPARE(fcmp_cond_s(SEQ, f5, f6, FCC0),
- "0c1298a0 fcmp.seq.s fcc0, f5, f6");
+ "0c1298a0 fcmp.seq.s fcc0, f5, f6");
COMPARE(fcmp_cond_s(SUEQ, f9, f10, FCC0),
- "0c16a920 fcmp.sueq.s fcc0, f9, f10");
+ "0c16a920 fcmp.sueq.s fcc0, f9, f10");
// COMPARE(fcmp_cond_s(SLT, f13, f14, FCC0),
- // "0c11b9a0 fcmp.slt.s fcc0, f13, f14");
+ // "0c11b9a0 fcmp.slt.s fcc0, f13, f14");
COMPARE(fcmp_cond_s(SULT, f1, f2, FCC0),
- "0c158820 fcmp.sult.s fcc0, f1, f2");
+ "0c158820 fcmp.sult.s fcc0, f1, f2");
COMPARE(fcmp_cond_s(SLE, f5, f6, FCC0),
- "0c1398a0 fcmp.sle.s fcc0, f5, f6");
+ "0c1398a0 fcmp.sle.s fcc0, f5, f6");
COMPARE(fcmp_cond_s(SULE, f9, f10, FCC0),
- "0c17a920 fcmp.sule.s fcc0, f9, f10");
+ "0c17a920 fcmp.sule.s fcc0, f9, f10");
COMPARE(fcmp_cond_s(SNE, f13, f14, FCC0),
- "0c18b9a0 fcmp.sne.s fcc0, f13, f14");
+ "0c18b9a0 fcmp.sne.s fcc0, f13, f14");
COMPARE(fcmp_cond_s(SOR, f13, f14, FCC0),
- "0c1ab9a0 fcmp.sor.s fcc0, f13, f14");
+ "0c1ab9a0 fcmp.sor.s fcc0, f13, f14");
COMPARE(fcmp_cond_s(SUNE, f1, f2, FCC0),
- "0c1c8820 fcmp.sune.s fcc0, f1, f2");
+ "0c1c8820 fcmp.sune.s fcc0, f1, f2");
COMPARE(fcmp_cond_d(CAF, f1, f2, FCC0),
- "0c200820 fcmp.caf.d fcc0, f1, f2");
+ "0c200820 fcmp.caf.d fcc0, f1, f2");
COMPARE(fcmp_cond_d(CUN, f5, f6, FCC0),
- "0c2418a0 fcmp.cun.d fcc0, f5, f6");
+ "0c2418a0 fcmp.cun.d fcc0, f5, f6");
COMPARE(fcmp_cond_d(CEQ, f9, f10, FCC0),
- "0c222920 fcmp.ceq.d fcc0, f9, f10");
+ "0c222920 fcmp.ceq.d fcc0, f9, f10");
COMPARE(fcmp_cond_d(CUEQ, f13, f14, FCC0),
- "0c2639a0 fcmp.cueq.d fcc0, f13, f14");
+ "0c2639a0 fcmp.cueq.d fcc0, f13, f14");
COMPARE(fcmp_cond_d(CLT, f1, f2, FCC0),
- "0c210820 fcmp.clt.d fcc0, f1, f2");
+ "0c210820 fcmp.clt.d fcc0, f1, f2");
COMPARE(fcmp_cond_d(CULT, f5, f6, FCC0),
- "0c2518a0 fcmp.cult.d fcc0, f5, f6");
+ "0c2518a0 fcmp.cult.d fcc0, f5, f6");
COMPARE(fcmp_cond_d(CLE, f9, f10, FCC0),
- "0c232920 fcmp.cle.d fcc0, f9, f10");
+ "0c232920 fcmp.cle.d fcc0, f9, f10");
COMPARE(fcmp_cond_d(CULE, f13, f14, FCC0),
- "0c2739a0 fcmp.cule.d fcc0, f13, f14");
+ "0c2739a0 fcmp.cule.d fcc0, f13, f14");
COMPARE(fcmp_cond_d(CNE, f1, f2, FCC0),
- "0c280820 fcmp.cne.d fcc0, f1, f2");
+ "0c280820 fcmp.cne.d fcc0, f1, f2");
COMPARE(fcmp_cond_d(COR, f5, f6, FCC0),
- "0c2a18a0 fcmp.cor.d fcc0, f5, f6");
+ "0c2a18a0 fcmp.cor.d fcc0, f5, f6");
COMPARE(fcmp_cond_d(CUNE, f9, f10, FCC0),
- "0c2c2920 fcmp.cune.d fcc0, f9, f10");
+ "0c2c2920 fcmp.cune.d fcc0, f9, f10");
COMPARE(fcmp_cond_d(SAF, f13, f14, FCC0),
- "0c20b9a0 fcmp.saf.d fcc0, f13, f14");
+ "0c20b9a0 fcmp.saf.d fcc0, f13, f14");
COMPARE(fcmp_cond_d(SUN, f1, f2, FCC0),
- "0c248820 fcmp.sun.d fcc0, f1, f2");
+ "0c248820 fcmp.sun.d fcc0, f1, f2");
COMPARE(fcmp_cond_d(SEQ, f5, f6, FCC0),
- "0c2298a0 fcmp.seq.d fcc0, f5, f6");
+ "0c2298a0 fcmp.seq.d fcc0, f5, f6");
COMPARE(fcmp_cond_d(SUEQ, f9, f10, FCC0),
- "0c26a920 fcmp.sueq.d fcc0, f9, f10");
+ "0c26a920 fcmp.sueq.d fcc0, f9, f10");
// COMPARE(fcmp_cond_d(SLT, f13, f14, FCC0),
- // "0c21b9a0 fcmp.slt.d fcc0, f13, f14");
+ // "0c21b9a0 fcmp.slt.d fcc0, f13, f14");
COMPARE(fcmp_cond_d(SULT, f1, f2, FCC0),
- "0c258820 fcmp.sult.d fcc0, f1, f2");
+ "0c258820 fcmp.sult.d fcc0, f1, f2");
COMPARE(fcmp_cond_d(SLE, f5, f6, FCC0),
- "0c2398a0 fcmp.sle.d fcc0, f5, f6");
+ "0c2398a0 fcmp.sle.d fcc0, f5, f6");
COMPARE(fcmp_cond_d(SULE, f9, f10, FCC0),
- "0c27a920 fcmp.sule.d fcc0, f9, f10");
+ "0c27a920 fcmp.sule.d fcc0, f9, f10");
COMPARE(fcmp_cond_d(SNE, f13, f14, FCC0),
- "0c28b9a0 fcmp.sne.d fcc0, f13, f14");
+ "0c28b9a0 fcmp.sne.d fcc0, f13, f14");
COMPARE(fcmp_cond_d(SOR, f13, f14, FCC0),
- "0c2ab9a0 fcmp.sor.d fcc0, f13, f14");
+ "0c2ab9a0 fcmp.sor.d fcc0, f13, f14");
COMPARE(fcmp_cond_d(SUNE, f1, f2, FCC0),
- "0c2c8820 fcmp.sune.d fcc0, f1, f2");
+ "0c2c8820 fcmp.sune.d fcc0, f1, f2");
VERIFY_RUN();
}
@@ -495,47 +554,47 @@ TEST(TypeOp12) {
TEST(TypeOp14) {
SET_UP();
- COMPARE(alsl_w(a0, a1, a2, 1), "000418a4 alsl.w a0, a1, a2, 1");
- COMPARE(alsl_w(a3, a4, a5, 3), "00052507 alsl.w a3, a4, a5, 3");
- COMPARE(alsl_w(a6, a7, t0, 4), "0005b16a alsl.w a6, a7, t0, 4");
+ COMPARE(alsl_w(a0, a1, a2, 1), "000418a4 alsl.w a0, a1, a2, 1");
+ COMPARE(alsl_w(a3, a4, a5, 3), "00052507 alsl.w a3, a4, a5, 3");
+ COMPARE(alsl_w(a6, a7, t0, 4), "0005b16a alsl.w a6, a7, t0, 4");
- COMPARE(alsl_wu(t1, t2, t3, 1), "00063dcd alsl.wu t1, t2, t3, 1");
- COMPARE(alsl_wu(t4, t5, t6, 3), "00074a30 alsl.wu t4, t5, t6, 3");
- COMPARE(alsl_wu(a0, a1, a2, 4), "000798a4 alsl.wu a0, a1, a2, 4");
+ COMPARE(alsl_wu(t1, t2, t3, 1), "00063dcd alsl.wu t1, t2, t3, 1");
+ COMPARE(alsl_wu(t4, t5, t6, 3), "00074a30 alsl.wu t4, t5, t6, 3");
+ COMPARE(alsl_wu(a0, a1, a2, 4), "000798a4 alsl.wu a0, a1, a2, 4");
- COMPARE(alsl_d(a3, a4, a5, 1), "002c2507 alsl.d a3, a4, a5, 1");
- COMPARE(alsl_d(a6, a7, t0, 3), "002d316a alsl.d a6, a7, t0, 3");
- COMPARE(alsl_d(t1, t2, t3, 4), "002dbdcd alsl.d t1, t2, t3, 4");
+ COMPARE(alsl_d(a3, a4, a5, 1), "002c2507 alsl.d a3, a4, a5, 1");
+ COMPARE(alsl_d(a6, a7, t0, 3), "002d316a alsl.d a6, a7, t0, 3");
+ COMPARE(alsl_d(t1, t2, t3, 4), "002dbdcd alsl.d t1, t2, t3, 4");
COMPARE(bytepick_w(t4, t5, t6, 0),
- "00084a30 bytepick.w t4, t5, t6, 0");
+ "00084a30 bytepick.w t4, t5, t6, 0");
COMPARE(bytepick_w(a0, a1, a2, 3),
- "000998a4 bytepick.w a0, a1, a2, 3");
+ "000998a4 bytepick.w a0, a1, a2, 3");
COMPARE(bytepick_d(a6, a7, t0, 0),
- "000c316a bytepick.d a6, a7, t0, 0");
+ "000c316a bytepick.d a6, a7, t0, 0");
COMPARE(bytepick_d(t4, t5, t6, 7),
- "000fca30 bytepick.d t4, t5, t6, 7");
+ "000fca30 bytepick.d t4, t5, t6, 7");
- COMPARE(slli_w(a3, a3, 31), "0040fce7 slli.w a3, a3, 31");
- COMPARE(slli_w(a6, a6, 1), "0040854a slli.w a6, a6, 1");
+ COMPARE(slli_w(a3, a3, 31), "0040fce7 slli.w a3, a3, 31");
+ COMPARE(slli_w(a6, a6, 1), "0040854a slli.w a6, a6, 1");
- COMPARE(slli_d(t3, t2, 63), "0041fdcf slli.d t3, t2, 63");
- COMPARE(slli_d(t4, a6, 1), "00410550 slli.d t4, a6, 1");
+ COMPARE(slli_d(t3, t2, 63), "0041fdcf slli.d t3, t2, 63");
+ COMPARE(slli_d(t4, a6, 1), "00410550 slli.d t4, a6, 1");
- COMPARE(srli_w(a7, a7, 31), "0044fd6b srli.w a7, a7, 31");
- COMPARE(srli_w(a4, a4, 1), "00448508 srli.w a4, a4, 1");
+ COMPARE(srli_w(a7, a7, 31), "0044fd6b srli.w a7, a7, 31");
+ COMPARE(srli_w(a4, a4, 1), "00448508 srli.w a4, a4, 1");
- COMPARE(srli_d(a4, a3, 63), "0045fce8 srli.d a4, a3, 63");
- COMPARE(srli_d(a4, a4, 1), "00450508 srli.d a4, a4, 1");
+ COMPARE(srli_d(a4, a3, 63), "0045fce8 srli.d a4, a3, 63");
+ COMPARE(srli_d(a4, a4, 1), "00450508 srli.d a4, a4, 1");
- COMPARE(srai_d(a0, a0, 63), "0049fc84 srai.d a0, a0, 63");
- COMPARE(srai_d(a4, a1, 1), "004904a8 srai.d a4, a1, 1");
+ COMPARE(srai_d(a0, a0, 63), "0049fc84 srai.d a0, a0, 63");
+ COMPARE(srai_d(a4, a1, 1), "004904a8 srai.d a4, a1, 1");
- COMPARE(srai_w(s4, a3, 31), "0048fcfb srai.w s4, a3, 31");
- COMPARE(srai_w(s4, a5, 1), "0048853b srai.w s4, a5, 1");
+ COMPARE(srai_w(s4, a3, 31), "0048fcfb srai.w s4, a3, 31");
+ COMPARE(srai_w(s4, a5, 1), "0048853b srai.w s4, a5, 1");
- COMPARE(rotri_d(t7, t6, 1), "004d0653 rotri.d t7, t6, 1");
+ COMPARE(rotri_d(t7, t6, 1), "004d0653 rotri.d t7, t6, 1");
VERIFY_RUN();
}
@@ -543,206 +602,209 @@ TEST(TypeOp14) {
TEST(TypeOp17) {
SET_UP();
- COMPARE(sltu(t5, t4, a4), "0012a211 sltu t5, t4, a4");
- COMPARE(sltu(t4, zero_reg, t4), "0012c010 sltu t4, zero_reg, t4");
+ COMPARE(sltu(t5, t4, a4), "0012a211 sltu t5, t4, a4");
+ COMPARE(sltu(t4, zero_reg, t4),
+ "0012c010 sltu t4, zero_reg, t4");
- COMPARE(add_w(a4, a4, a6), "00102908 add.w a4, a4, a6");
- COMPARE(add_w(a5, a6, t3), "00103d49 add.w a5, a6, t3");
+ COMPARE(add_w(a4, a4, a6), "00102908 add.w a4, a4, a6");
+ COMPARE(add_w(a5, a6, t3), "00103d49 add.w a5, a6, t3");
- COMPARE(add_d(a4, t0, t1), "0010b588 add.d a4, t0, t1");
- COMPARE(add_d(a6, a3, t1), "0010b4ea add.d a6, a3, t1");
+ COMPARE(add_d(a4, t0, t1), "0010b588 add.d a4, t0, t1");
+ COMPARE(add_d(a6, a3, t1), "0010b4ea add.d a6, a3, t1");
- COMPARE(sub_w(a7, a7, a2), "0011196b sub.w a7, a7, a2");
- COMPARE(sub_w(a2, a2, s3), "001168c6 sub.w a2, a2, s3");
+ COMPARE(sub_w(a7, a7, a2), "0011196b sub.w a7, a7, a2");
+ COMPARE(sub_w(a2, a2, s3), "001168c6 sub.w a2, a2, s3");
- COMPARE(sub_d(s3, ra, s3), "0011e83a sub.d s3, ra, s3");
- COMPARE(sub_d(a0, a1, a2), "001198a4 sub.d a0, a1, a2");
+ COMPARE(sub_d(s3, ra, s3), "0011e83a sub.d s3, ra, s3");
+ COMPARE(sub_d(a0, a1, a2), "001198a4 sub.d a0, a1, a2");
- COMPARE(slt(a5, a5, a6), "00122929 slt a5, a5, a6");
- COMPARE(slt(a6, t3, t4), "001241ea slt a6, t3, t4");
+ COMPARE(slt(a5, a5, a6), "00122929 slt a5, a5, a6");
+ COMPARE(slt(a6, t3, t4), "001241ea slt a6, t3, t4");
- COMPARE(masknez(a5, a5, a3), "00131d29 masknez a5, a5, a3");
- COMPARE(masknez(a3, a4, a5), "00132507 masknez a3, a4, a5");
+ COMPARE(masknez(a5, a5, a3), "00131d29 masknez a5, a5, a3");
+ COMPARE(masknez(a3, a4, a5), "00132507 masknez a3, a4, a5");
- COMPARE(maskeqz(a6, a7, t0), "0013b16a maskeqz a6, a7, t0");
- COMPARE(maskeqz(t1, t2, t3), "0013bdcd maskeqz t1, t2, t3");
+ COMPARE(maskeqz(a6, a7, t0), "0013b16a maskeqz a6, a7, t0");
+ COMPARE(maskeqz(t1, t2, t3), "0013bdcd maskeqz t1, t2, t3");
- COMPARE(or_(s3, sp, zero_reg), "0015007a or s3, sp, zero_reg");
- COMPARE(or_(a4, a0, zero_reg), "00150088 or a4, a0, zero_reg");
+ COMPARE(or_(s3, sp, zero_reg),
+ "0015007a or s3, sp, zero_reg");
+ COMPARE(or_(a4, a0, zero_reg),
+ "00150088 or a4, a0, zero_reg");
- COMPARE(and_(sp, sp, t6), "0014c863 and sp, sp, t6");
- COMPARE(and_(a3, a3, a7), "0014ace7 and a3, a3, a7");
+ COMPARE(and_(sp, sp, t6), "0014c863 and sp, sp, t6");
+ COMPARE(and_(a3, a3, a7), "0014ace7 and a3, a3, a7");
- COMPARE(nor(a7, a7, a7), "00142d6b nor a7, a7, a7");
- COMPARE(nor(t4, t5, t6), "00144a30 nor t4, t5, t6");
+ COMPARE(nor(a7, a7, a7), "00142d6b nor a7, a7, a7");
+ COMPARE(nor(t4, t5, t6), "00144a30 nor t4, t5, t6");
- COMPARE(xor_(a0, a1, a2), "001598a4 xor a0, a1, a2");
- COMPARE(xor_(a3, a4, a5), "0015a507 xor a3, a4, a5");
+ COMPARE(xor_(a0, a1, a2), "001598a4 xor a0, a1, a2");
+ COMPARE(xor_(a3, a4, a5), "0015a507 xor a3, a4, a5");
- COMPARE(orn(a6, a7, t0), "0016316a orn a6, a7, t0");
- COMPARE(orn(t1, t2, t3), "00163dcd orn t1, t2, t3");
+ COMPARE(orn(a6, a7, t0), "0016316a orn a6, a7, t0");
+ COMPARE(orn(t1, t2, t3), "00163dcd orn t1, t2, t3");
- COMPARE(andn(t4, t5, t6), "0016ca30 andn t4, t5, t6");
- COMPARE(andn(a0, a1, a2), "001698a4 andn a0, a1, a2");
+ COMPARE(andn(t4, t5, t6), "0016ca30 andn t4, t5, t6");
+ COMPARE(andn(a0, a1, a2), "001698a4 andn a0, a1, a2");
- COMPARE(sll_w(a3, t0, a7), "00172d87 sll.w a3, t0, a7");
- COMPARE(sll_w(a3, a4, a3), "00171d07 sll.w a3, a4, a3");
+ COMPARE(sll_w(a3, t0, a7), "00172d87 sll.w a3, t0, a7");
+ COMPARE(sll_w(a3, a4, a3), "00171d07 sll.w a3, a4, a3");
- COMPARE(srl_w(a3, a4, a3), "00179d07 srl.w a3, a4, a3");
- COMPARE(srl_w(a3, t1, t4), "0017c1a7 srl.w a3, t1, t4");
+ COMPARE(srl_w(a3, a4, a3), "00179d07 srl.w a3, a4, a3");
+ COMPARE(srl_w(a3, t1, t4), "0017c1a7 srl.w a3, t1, t4");
- COMPARE(sra_w(a4, t4, a4), "00182208 sra.w a4, t4, a4");
- COMPARE(sra_w(a3, t1, a6), "001829a7 sra.w a3, t1, a6");
+ COMPARE(sra_w(a4, t4, a4), "00182208 sra.w a4, t4, a4");
+ COMPARE(sra_w(a3, t1, a6), "001829a7 sra.w a3, t1, a6");
- COMPARE(sll_d(a3, a1, a3), "00189ca7 sll.d a3, a1, a3");
- COMPARE(sll_d(a7, a4, t0), "0018b10b sll.d a7, a4, t0");
+ COMPARE(sll_d(a3, a1, a3), "00189ca7 sll.d a3, a1, a3");
+ COMPARE(sll_d(a7, a4, t0), "0018b10b sll.d a7, a4, t0");
- COMPARE(srl_d(a7, a7, t0), "0019316b srl.d a7, a7, t0");
- COMPARE(srl_d(t0, a6, t0), "0019314c srl.d t0, a6, t0");
+ COMPARE(srl_d(a7, a7, t0), "0019316b srl.d a7, a7, t0");
+ COMPARE(srl_d(t0, a6, t0), "0019314c srl.d t0, a6, t0");
- COMPARE(sra_d(a3, a4, a5), "0019a507 sra.d a3, a4, a5");
- COMPARE(sra_d(a6, a7, t0), "0019b16a sra.d a6, a7, t0");
+ COMPARE(sra_d(a3, a4, a5), "0019a507 sra.d a3, a4, a5");
+ COMPARE(sra_d(a6, a7, t0), "0019b16a sra.d a6, a7, t0");
- COMPARE(rotr_d(t1, t2, t3), "001bbdcd rotr.d t1, t2, t3");
- COMPARE(rotr_d(t4, t5, t6), "001bca30 rotr.d t4, t5, t6");
+ COMPARE(rotr_d(t1, t2, t3), "001bbdcd rotr.d t1, t2, t3");
+ COMPARE(rotr_d(t4, t5, t6), "001bca30 rotr.d t4, t5, t6");
- COMPARE(rotr_w(a0, a1, a2), "001b18a4 rotr.w a0, a1, a2");
- COMPARE(rotr_w(a3, a4, a5), "001b2507 rotr.w a3, a4, a5");
+ COMPARE(rotr_w(a0, a1, a2), "001b18a4 rotr.w a0, a1, a2");
+ COMPARE(rotr_w(a3, a4, a5), "001b2507 rotr.w a3, a4, a5");
- COMPARE(mul_w(t8, a5, t7), "001c4d34 mul.w t8, a5, t7");
- COMPARE(mul_w(t4, t5, t6), "001c4a30 mul.w t4, t5, t6");
+ COMPARE(mul_w(t8, a5, t7), "001c4d34 mul.w t8, a5, t7");
+ COMPARE(mul_w(t4, t5, t6), "001c4a30 mul.w t4, t5, t6");
- COMPARE(mulh_w(s3, a3, t7), "001cccfa mulh.w s3, a3, t7");
- COMPARE(mulh_w(a0, a1, a2), "001c98a4 mulh.w a0, a1, a2");
+ COMPARE(mulh_w(s3, a3, t7), "001cccfa mulh.w s3, a3, t7");
+ COMPARE(mulh_w(a0, a1, a2), "001c98a4 mulh.w a0, a1, a2");
- COMPARE(mulh_wu(a6, a7, t0), "001d316a mulh.wu a6, a7, t0");
- COMPARE(mulh_wu(t1, t2, t3), "001d3dcd mulh.wu t1, t2, t3");
+ COMPARE(mulh_wu(a6, a7, t0), "001d316a mulh.wu a6, a7, t0");
+ COMPARE(mulh_wu(t1, t2, t3), "001d3dcd mulh.wu t1, t2, t3");
- COMPARE(mul_d(t2, a5, t1), "001db52e mul.d t2, a5, t1");
- COMPARE(mul_d(a4, a4, a5), "001da508 mul.d a4, a4, a5");
+ COMPARE(mul_d(t2, a5, t1), "001db52e mul.d t2, a5, t1");
+ COMPARE(mul_d(a4, a4, a5), "001da508 mul.d a4, a4, a5");
- COMPARE(mulh_d(a3, a4, a5), "001e2507 mulh.d a3, a4, a5");
- COMPARE(mulh_d(a6, a7, t0), "001e316a mulh.d a6, a7, t0");
+ COMPARE(mulh_d(a3, a4, a5), "001e2507 mulh.d a3, a4, a5");
+ COMPARE(mulh_d(a6, a7, t0), "001e316a mulh.d a6, a7, t0");
- COMPARE(mulh_du(t1, t2, t3), "001ebdcd mulh.du t1, t2, t3");
- COMPARE(mulh_du(t4, t5, t6), "001eca30 mulh.du t4, t5, t6");
+ COMPARE(mulh_du(t1, t2, t3), "001ebdcd mulh.du t1, t2, t3");
+ COMPARE(mulh_du(t4, t5, t6), "001eca30 mulh.du t4, t5, t6");
COMPARE(mulw_d_w(a0, a1, a2), "001f18a4 mulw.d.w a0, a1, a2");
COMPARE(mulw_d_w(a3, a4, a5), "001f2507 mulw.d.w a3, a4, a5");
- COMPARE(mulw_d_wu(a6, a7, t0), "001fb16a mulw.d.wu a6, a7, t0");
- COMPARE(mulw_d_wu(t1, t2, t3), "001fbdcd mulw.d.wu t1, t2, t3");
+ COMPARE(mulw_d_wu(a6, a7, t0), "001fb16a mulw.d.wu a6, a7, t0");
+ COMPARE(mulw_d_wu(t1, t2, t3), "001fbdcd mulw.d.wu t1, t2, t3");
- COMPARE(div_w(a5, a5, a3), "00201d29 div.w a5, a5, a3");
- COMPARE(div_w(t4, t5, t6), "00204a30 div.w t4, t5, t6");
+ COMPARE(div_w(a5, a5, a3), "00201d29 div.w a5, a5, a3");
+ COMPARE(div_w(t4, t5, t6), "00204a30 div.w t4, t5, t6");
- COMPARE(mod_w(a6, t3, a6), "0020a9ea mod.w a6, t3, a6");
- COMPARE(mod_w(a3, a4, a3), "00209d07 mod.w a3, a4, a3");
+ COMPARE(mod_w(a6, t3, a6), "0020a9ea mod.w a6, t3, a6");
+ COMPARE(mod_w(a3, a4, a3), "00209d07 mod.w a3, a4, a3");
- COMPARE(div_wu(t1, t2, t3), "00213dcd div.wu t1, t2, t3");
- COMPARE(div_wu(t4, t5, t6), "00214a30 div.wu t4, t5, t6");
+ COMPARE(div_wu(t1, t2, t3), "00213dcd div.wu t1, t2, t3");
+ COMPARE(div_wu(t4, t5, t6), "00214a30 div.wu t4, t5, t6");
- COMPARE(mod_wu(a0, a1, a2), "002198a4 mod.wu a0, a1, a2");
- COMPARE(mod_wu(a3, a4, a5), "0021a507 mod.wu a3, a4, a5");
+ COMPARE(mod_wu(a0, a1, a2), "002198a4 mod.wu a0, a1, a2");
+ COMPARE(mod_wu(a3, a4, a5), "0021a507 mod.wu a3, a4, a5");
- COMPARE(div_d(t0, t0, a6), "0022298c div.d t0, t0, a6");
- COMPARE(div_d(a7, a7, a5), "0022256b div.d a7, a7, a5");
+ COMPARE(div_d(t0, t0, a6), "0022298c div.d t0, t0, a6");
+ COMPARE(div_d(a7, a7, a5), "0022256b div.d a7, a7, a5");
- COMPARE(mod_d(a6, a7, t0), "0022b16a mod.d a6, a7, t0");
- COMPARE(mod_d(t1, t2, t3), "0022bdcd mod.d t1, t2, t3");
+ COMPARE(mod_d(a6, a7, t0), "0022b16a mod.d a6, a7, t0");
+ COMPARE(mod_d(t1, t2, t3), "0022bdcd mod.d t1, t2, t3");
- COMPARE(div_du(t4, t5, t6), "00234a30 div.du t4, t5, t6");
- COMPARE(div_du(a0, a1, a2), "002318a4 div.du a0, a1, a2");
+ COMPARE(div_du(t4, t5, t6), "00234a30 div.du t4, t5, t6");
+ COMPARE(div_du(a0, a1, a2), "002318a4 div.du a0, a1, a2");
- COMPARE(mod_du(a3, a4, a5), "0023a507 mod.du a3, a4, a5");
- COMPARE(mod_du(a6, a7, t0), "0023b16a mod.du a6, a7, t0");
+ COMPARE(mod_du(a3, a4, a5), "0023a507 mod.du a3, a4, a5");
+ COMPARE(mod_du(a6, a7, t0), "0023b16a mod.du a6, a7, t0");
- COMPARE(fadd_s(f3, f4, f5), "01009483 fadd.s f3, f4, f5");
- COMPARE(fadd_s(f6, f7, f8), "0100a0e6 fadd.s f6, f7, f8");
+ COMPARE(fadd_s(f3, f4, f5), "01009483 fadd.s f3, f4, f5");
+ COMPARE(fadd_s(f6, f7, f8), "0100a0e6 fadd.s f6, f7, f8");
- COMPARE(fadd_d(f0, f1, f0), "01010020 fadd.d f0, f1, f0");
- COMPARE(fadd_d(f0, f1, f2), "01010820 fadd.d f0, f1, f2");
+ COMPARE(fadd_d(f0, f1, f0), "01010020 fadd.d f0, f1, f0");
+ COMPARE(fadd_d(f0, f1, f2), "01010820 fadd.d f0, f1, f2");
- COMPARE(fsub_s(f9, f10, f11), "0102ad49 fsub.s f9, f10, f11");
- COMPARE(fsub_s(f12, f13, f14), "0102b9ac fsub.s f12, f13, f14");
+ COMPARE(fsub_s(f9, f10, f11), "0102ad49 fsub.s f9, f10, f11");
+ COMPARE(fsub_s(f12, f13, f14), "0102b9ac fsub.s f12, f13, f14");
- COMPARE(fsub_d(f30, f0, f30), "0103781e fsub.d f30, f0, f30");
- COMPARE(fsub_d(f0, f0, f1), "01030400 fsub.d f0, f0, f1");
+ COMPARE(fsub_d(f30, f0, f30), "0103781e fsub.d f30, f0, f30");
+ COMPARE(fsub_d(f0, f0, f1), "01030400 fsub.d f0, f0, f1");
- COMPARE(fmul_s(f15, f16, f17), "0104c60f fmul.s f15, f16, f17");
- COMPARE(fmul_s(f18, f19, f20), "0104d272 fmul.s f18, f19, f20");
+ COMPARE(fmul_s(f15, f16, f17), "0104c60f fmul.s f15, f16, f17");
+ COMPARE(fmul_s(f18, f19, f20), "0104d272 fmul.s f18, f19, f20");
- COMPARE(fmul_d(f0, f0, f1), "01050400 fmul.d f0, f0, f1");
- COMPARE(fmul_d(f0, f0, f0), "01050000 fmul.d f0, f0, f0");
+ COMPARE(fmul_d(f0, f0, f1), "01050400 fmul.d f0, f0, f1");
+ COMPARE(fmul_d(f0, f0, f0), "01050000 fmul.d f0, f0, f0");
- COMPARE(fdiv_s(f0, f1, f2), "01068820 fdiv.s f0, f1, f2");
- COMPARE(fdiv_s(f3, f4, f5), "01069483 fdiv.s f3, f4, f5");
+ COMPARE(fdiv_s(f0, f1, f2), "01068820 fdiv.s f0, f1, f2");
+ COMPARE(fdiv_s(f3, f4, f5), "01069483 fdiv.s f3, f4, f5");
- COMPARE(fdiv_d(f0, f0, f1), "01070400 fdiv.d f0, f0, f1");
- COMPARE(fdiv_d(f0, f1, f0), "01070020 fdiv.d f0, f1, f0");
+ COMPARE(fdiv_d(f0, f0, f1), "01070400 fdiv.d f0, f0, f1");
+ COMPARE(fdiv_d(f0, f1, f0), "01070020 fdiv.d f0, f1, f0");
- COMPARE(fmax_s(f9, f10, f11), "0108ad49 fmax.s f9, f10, f11");
- COMPARE(fmin_s(f6, f7, f8), "010aa0e6 fmin.s f6, f7, f8");
+ COMPARE(fmax_s(f9, f10, f11), "0108ad49 fmax.s f9, f10, f11");
+ COMPARE(fmin_s(f6, f7, f8), "010aa0e6 fmin.s f6, f7, f8");
- COMPARE(fmax_d(f0, f1, f0), "01090020 fmax.d f0, f1, f0");
- COMPARE(fmin_d(f0, f1, f0), "010b0020 fmin.d f0, f1, f0");
+ COMPARE(fmax_d(f0, f1, f0), "01090020 fmax.d f0, f1, f0");
+ COMPARE(fmin_d(f0, f1, f0), "010b0020 fmin.d f0, f1, f0");
- COMPARE(fmaxa_s(f12, f13, f14), "010cb9ac fmaxa.s f12, f13, f14");
- COMPARE(fmina_s(f15, f16, f17), "010ec60f fmina.s f15, f16, f17");
+ COMPARE(fmaxa_s(f12, f13, f14), "010cb9ac fmaxa.s f12, f13, f14");
+ COMPARE(fmina_s(f15, f16, f17), "010ec60f fmina.s f15, f16, f17");
- COMPARE(fmaxa_d(f18, f19, f20), "010d5272 fmaxa.d f18, f19, f20");
- COMPARE(fmina_d(f0, f1, f2), "010f0820 fmina.d f0, f1, f2");
+ COMPARE(fmaxa_d(f18, f19, f20), "010d5272 fmaxa.d f18, f19, f20");
+ COMPARE(fmina_d(f0, f1, f2), "010f0820 fmina.d f0, f1, f2");
- COMPARE(ldx_b(a0, a1, a2), "380018a4 ldx.b a0, a1, a2");
- COMPARE(ldx_h(a3, a4, a5), "38042507 ldx.h a3, a4, a5");
- COMPARE(ldx_w(a6, a7, t0), "3808316a ldx.w a6, a7, t0");
+ COMPARE(ldx_b(a0, a1, a2), "380018a4 ldx.b a0, a1, a2");
+ COMPARE(ldx_h(a3, a4, a5), "38042507 ldx.h a3, a4, a5");
+ COMPARE(ldx_w(a6, a7, t0), "3808316a ldx.w a6, a7, t0");
- COMPARE(stx_b(t1, t2, t3), "38103dcd stx.b t1, t2, t3");
- COMPARE(stx_h(t4, t5, t6), "38144a30 stx.h t4, t5, t6");
- COMPARE(stx_w(a0, a1, a2), "381818a4 stx.w a0, a1, a2");
+ COMPARE(stx_b(t1, t2, t3), "38103dcd stx.b t1, t2, t3");
+ COMPARE(stx_h(t4, t5, t6), "38144a30 stx.h t4, t5, t6");
+ COMPARE(stx_w(a0, a1, a2), "381818a4 stx.w a0, a1, a2");
- COMPARE(ldx_bu(a3, a4, a5), "38202507 ldx.bu a3, a4, a5");
- COMPARE(ldx_hu(a6, a7, t0), "3824316a ldx.hu a6, a7, t0");
- COMPARE(ldx_wu(t1, t2, t3), "38283dcd ldx.wu t1, t2, t3");
+ COMPARE(ldx_bu(a3, a4, a5), "38202507 ldx.bu a3, a4, a5");
+ COMPARE(ldx_hu(a6, a7, t0), "3824316a ldx.hu a6, a7, t0");
+ COMPARE(ldx_wu(t1, t2, t3), "38283dcd ldx.wu t1, t2, t3");
- COMPARE(ldx_d(a2, s6, t6), "380c4ba6 ldx.d a2, s6, t6");
- COMPARE(ldx_d(t7, s6, t6), "380c4bb3 ldx.d t7, s6, t6");
+ COMPARE(ldx_d(a2, s6, t6), "380c4ba6 ldx.d a2, s6, t6");
+ COMPARE(ldx_d(t7, s6, t6), "380c4bb3 ldx.d t7, s6, t6");
- COMPARE(stx_d(a4, a3, t6), "381c48e8 stx.d a4, a3, t6");
- COMPARE(stx_d(a0, a3, t6), "381c48e4 stx.d a0, a3, t6");
+ COMPARE(stx_d(a4, a3, t6), "381c48e8 stx.d a4, a3, t6");
+ COMPARE(stx_d(a0, a3, t6), "381c48e4 stx.d a0, a3, t6");
- COMPARE(dbar(0), "38720000 dbar 0x0(0)");
- COMPARE(ibar(5555), "387295b3 ibar 0x15b3(5555)");
+ COMPARE(dbar(0), "38720000 dbar 0x0(0)");
+ COMPARE(ibar(5555), "387295b3 ibar 0x15b3(5555)");
- COMPARE(break_(0), "002a0000 break code: 0x0(0)");
- COMPARE(break_(0x3fc0), "002a3fc0 break code: 0x3fc0(16320)");
+ COMPARE(break_(0), "002a0000 break code: 0x0(0)");
+ COMPARE(break_(0x3fc0), "002a3fc0 break code: 0x3fc0(16320)");
- COMPARE(fldx_s(f3, a4, a5), "38302503 fldx.s f3, a4, a5");
- COMPARE(fldx_d(f6, a7, t0), "38343166 fldx.d f6, a7, t0");
+ COMPARE(fldx_s(f3, a4, a5), "38302503 fldx.s f3, a4, a5");
+ COMPARE(fldx_d(f6, a7, t0), "38343166 fldx.d f6, a7, t0");
- COMPARE(fstx_s(f1, t2, t3), "38383dc1 fstx.s f1, t2, t3");
- COMPARE(fstx_d(f4, t5, t6), "383c4a24 fstx.d f4, t5, t6");
+ COMPARE(fstx_s(f1, t2, t3), "38383dc1 fstx.s f1, t2, t3");
+ COMPARE(fstx_d(f4, t5, t6), "383c4a24 fstx.d f4, t5, t6");
COMPARE(amswap_w(a4, a5, a6), "38602548 amswap.w a4, a5, a6");
COMPARE(amswap_d(a7, t0, t1), "3860b1ab amswap.d a7, t0, t1");
- COMPARE(amadd_w(t2, t3, t4), "38613e0e amadd.w t2, t3, t4");
- COMPARE(amadd_d(t5, t6, a0), "3861c891 amadd.d t5, t6, a0");
+ COMPARE(amadd_w(t2, t3, t4), "38613e0e amadd.w t2, t3, t4");
+ COMPARE(amadd_d(t5, t6, a0), "3861c891 amadd.d t5, t6, a0");
- COMPARE(amand_w(a1, a2, a3), "386218e5 amand.w a1, a2, a3");
- COMPARE(amand_d(a4, a5, a6), "3862a548 amand.d a4, a5, a6");
+ COMPARE(amand_w(a1, a2, a3), "386218e5 amand.w a1, a2, a3");
+ COMPARE(amand_d(a4, a5, a6), "3862a548 amand.d a4, a5, a6");
- COMPARE(amor_w(a7, t0, t1), "386331ab amor.w a7, t0, t1");
- COMPARE(amor_d(t2, t3, t4), "3863be0e amor.d t2, t3, t4");
+ COMPARE(amor_w(a7, t0, t1), "386331ab amor.w a7, t0, t1");
+ COMPARE(amor_d(t2, t3, t4), "3863be0e amor.d t2, t3, t4");
- COMPARE(amxor_w(t5, t6, a0), "38644891 amxor.w t5, t6, a0");
- COMPARE(amxor_d(a1, a2, a3), "386498e5 amxor.d a1, a2, a3");
+ COMPARE(amxor_w(t5, t6, a0), "38644891 amxor.w t5, t6, a0");
+ COMPARE(amxor_d(a1, a2, a3), "386498e5 amxor.d a1, a2, a3");
- COMPARE(ammax_w(a4, a5, a6), "38652548 ammax.w a4, a5, a6");
- COMPARE(ammax_d(a7, t0, t1), "3865b1ab ammax.d a7, t0, t1");
+ COMPARE(ammax_w(a4, a5, a6), "38652548 ammax.w a4, a5, a6");
+ COMPARE(ammax_d(a7, t0, t1), "3865b1ab ammax.d a7, t0, t1");
- COMPARE(ammin_w(t2, t3, t4), "38663e0e ammin.w t2, t3, t4");
- COMPARE(ammin_d(t5, t6, a0), "3866c891 ammin.d t5, t6, a0");
+ COMPARE(ammin_w(t2, t3, t4), "38663e0e ammin.w t2, t3, t4");
+ COMPARE(ammin_d(t5, t6, a0), "3866c891 ammin.d t5, t6, a0");
COMPARE(ammax_wu(a1, a2, a3), "386718e5 ammax.wu a1, a2, a3");
COMPARE(ammax_du(a4, a5, a6), "3867a548 ammax.du a4, a5, a6");
@@ -750,24 +812,24 @@ TEST(TypeOp17) {
COMPARE(ammin_wu(a7, t0, t1), "386831ab ammin.wu a7, t0, t1");
COMPARE(ammin_du(t2, t3, t4), "3868be0e ammin.du t2, t3, t4");
- COMPARE(ammax_db_d(a0, a1, a2), "386e94c4 ammax_db.d a0, a1, a2");
- COMPARE(ammax_db_du(a3, a4, a5), "3870a127 ammax_db.du a3, a4, a5");
+ COMPARE(ammax_db_d(a0, a1, a2), "386e94c4 ammax_db.d a0, a1, a2");
+ COMPARE(ammax_db_du(a3, a4, a5), "3870a127 ammax_db.du a3, a4, a5");
- COMPARE(ammax_db_w(a6, a7, t0), "386e2d8a ammax_db.w a6, a7, t0");
- COMPARE(ammax_db_wu(t1, t2, t3), "387039ed ammax_db.wu t1, t2, t3");
+ COMPARE(ammax_db_w(a6, a7, t0), "386e2d8a ammax_db.w a6, a7, t0");
+ COMPARE(ammax_db_wu(t1, t2, t3), "387039ed ammax_db.wu t1, t2, t3");
- COMPARE(ammin_db_d(t4, t5, t6), "386fc650 ammin_db.d t4, t5, t6");
- COMPARE(ammin_db_du(a0, a1, a2), "387194c4 ammin_db.du a0, a1, a2");
+ COMPARE(ammin_db_d(t4, t5, t6), "386fc650 ammin_db.d t4, t5, t6");
+ COMPARE(ammin_db_du(a0, a1, a2), "387194c4 ammin_db.du a0, a1, a2");
- COMPARE(ammin_db_wu(a3, a4, a5), "38712127 ammin_db.wu a3, a4, a5");
- COMPARE(ammin_db_w(a6, a7, t0), "386f2d8a ammin_db.w a6, a7, t0");
+ COMPARE(ammin_db_wu(a3, a4, a5), "38712127 ammin_db.wu a3, a4, a5");
+ COMPARE(ammin_db_w(a6, a7, t0), "386f2d8a ammin_db.w a6, a7, t0");
- COMPARE(fscaleb_s(f0, f1, f2), "01108820 fscaleb.s f0, f1, f2");
- COMPARE(fscaleb_d(f3, f4, f5), "01111483 fscaleb.d f3, f4, f5");
+ COMPARE(fscaleb_s(f0, f1, f2), "01108820 fscaleb.s f0, f1, f2");
+ COMPARE(fscaleb_d(f3, f4, f5), "01111483 fscaleb.d f3, f4, f5");
- COMPARE(fcopysign_s(f6, f7, f8), "0112a0e6 fcopysign.s f6, f7, f8");
+ COMPARE(fcopysign_s(f6, f7, f8), "0112a0e6 fcopysign.s f6, f7, f8");
COMPARE(fcopysign_d(f9, f10, f12),
- "01133149 fcopysign.d f9, f10, f12");
+ "01133149 fcopysign.d f9, f10, f12");
VERIFY_RUN();
}
@@ -775,64 +837,64 @@ TEST(TypeOp17) {
TEST(TypeOp22) {
SET_UP();
- COMPARE(clz_w(a3, a0), "00001487 clz.w a3, a0");
- COMPARE(ctz_w(a0, a1), "00001ca4 ctz.w a0, a1");
- COMPARE(clz_d(a2, a3), "000024e6 clz.d a2, a3");
- COMPARE(ctz_d(a4, a5), "00002d28 ctz.d a4, a5");
+ COMPARE(clz_w(a3, a0), "00001487 clz.w a3, a0");
+ COMPARE(ctz_w(a0, a1), "00001ca4 ctz.w a0, a1");
+ COMPARE(clz_d(a2, a3), "000024e6 clz.d a2, a3");
+ COMPARE(ctz_d(a4, a5), "00002d28 ctz.d a4, a5");
- COMPARE(clo_w(a0, a1), "000010a4 clo.w a0, a1");
- COMPARE(cto_w(a2, a3), "000018e6 cto.w a2, a3");
- COMPARE(clo_d(a4, a5), "00002128 clo.d a4, a5");
- COMPARE(cto_d(a6, a7), "0000296a cto.d a6, a7");
+ COMPARE(clo_w(a0, a1), "000010a4 clo.w a0, a1");
+ COMPARE(cto_w(a2, a3), "000018e6 cto.w a2, a3");
+ COMPARE(clo_d(a4, a5), "00002128 clo.d a4, a5");
+ COMPARE(cto_d(a6, a7), "0000296a cto.d a6, a7");
- COMPARE(revb_2h(a6, a7), "0000316a revb.2h a6, a7");
- COMPARE(revb_4h(t0, t1), "000035ac revb.4h t0, t1");
- COMPARE(revb_2w(t2, t3), "000039ee revb.2w t2, t3");
- COMPARE(revb_d(t4, t5), "00003e30 revb.d t4, t5");
+ COMPARE(revb_2h(a6, a7), "0000316a revb.2h a6, a7");
+ COMPARE(revb_4h(t0, t1), "000035ac revb.4h t0, t1");
+ COMPARE(revb_2w(t2, t3), "000039ee revb.2w t2, t3");
+ COMPARE(revb_d(t4, t5), "00003e30 revb.d t4, t5");
- COMPARE(revh_2w(a0, a1), "000040a4 revh.2w a0, a1");
- COMPARE(revh_d(a2, a3), "000044e6 revh.d a2, a3");
+ COMPARE(revh_2w(a0, a1), "000040a4 revh.2w a0, a1");
+ COMPARE(revh_d(a2, a3), "000044e6 revh.d a2, a3");
- COMPARE(bitrev_4b(a4, a5), "00004928 bitrev.4b a4, a5");
- COMPARE(bitrev_8b(a6, a7), "00004d6a bitrev.8b a6, a7");
+ COMPARE(bitrev_4b(a4, a5), "00004928 bitrev.4b a4, a5");
+ COMPARE(bitrev_8b(a6, a7), "00004d6a bitrev.8b a6, a7");
COMPARE(bitrev_w(t0, t1), "000051ac bitrev.w t0, t1");
COMPARE(bitrev_d(t2, t3), "000055ee bitrev.d t2, t3");
- COMPARE(ext_w_b(t4, t5), "00005e30 ext.w.b t4, t5");
- COMPARE(ext_w_h(a0, a1), "000058a4 ext.w.h a0, a1");
+ COMPARE(ext_w_b(t4, t5), "00005e30 ext.w.b t4, t5");
+ COMPARE(ext_w_h(a0, a1), "000058a4 ext.w.h a0, a1");
- COMPARE(fabs_s(f2, f3), "01140462 fabs.s f2, f3");
- COMPARE(fabs_d(f0, f0), "01140800 fabs.d f0, f0");
+ COMPARE(fabs_s(f2, f3), "01140462 fabs.s f2, f3");
+ COMPARE(fabs_d(f0, f0), "01140800 fabs.d f0, f0");
- COMPARE(fneg_s(f0, f1), "01141420 fneg.s f0, f1");
- COMPARE(fneg_d(f0, f0), "01141800 fneg.d f0, f0");
+ COMPARE(fneg_s(f0, f1), "01141420 fneg.s f0, f1");
+ COMPARE(fneg_d(f0, f0), "01141800 fneg.d f0, f0");
- COMPARE(fsqrt_s(f4, f5), "011444a4 fsqrt.s f4, f5");
- COMPARE(fsqrt_d(f0, f0), "01144800 fsqrt.d f0, f0");
+ COMPARE(fsqrt_s(f4, f5), "011444a4 fsqrt.s f4, f5");
+ COMPARE(fsqrt_d(f0, f0), "01144800 fsqrt.d f0, f0");
- COMPARE(fmov_s(f6, f7), "011494e6 fmov.s f6, f7");
- COMPARE(fmov_d(f0, f1), "01149820 fmov.d f0, f1");
- COMPARE(fmov_d(f1, f0), "01149801 fmov.d f1, f0");
+ COMPARE(fmov_s(f6, f7), "011494e6 fmov.s f6, f7");
+ COMPARE(fmov_d(f0, f1), "01149820 fmov.d f0, f1");
+ COMPARE(fmov_d(f1, f0), "01149801 fmov.d f1, f0");
- COMPARE(movgr2fr_d(f0, t6), "0114aa40 movgr2fr.d f0, t6");
- COMPARE(movgr2fr_d(f1, t6), "0114aa41 movgr2fr.d f1, t6");
+ COMPARE(movgr2fr_d(f0, t6), "0114aa40 movgr2fr.d f0, t6");
+ COMPARE(movgr2fr_d(f1, t6), "0114aa41 movgr2fr.d f1, t6");
- COMPARE(movgr2fr_w(f30, a3), "0114a4fe movgr2fr.w f30, a3");
- COMPARE(movgr2fr_w(f30, a0), "0114a49e movgr2fr.w f30, a0");
+ COMPARE(movgr2fr_w(f30, a3), "0114a4fe movgr2fr.w f30, a3");
+ COMPARE(movgr2fr_w(f30, a0), "0114a49e movgr2fr.w f30, a0");
- COMPARE(movgr2frh_w(f30, t6), "0114ae5e movgr2frh.w f30, t6");
- COMPARE(movgr2frh_w(f0, a3), "0114ace0 movgr2frh.w f0, a3");
+ COMPARE(movgr2frh_w(f30, t6), "0114ae5e movgr2frh.w f30, t6");
+ COMPARE(movgr2frh_w(f0, a3), "0114ace0 movgr2frh.w f0, a3");
- COMPARE(movfr2gr_s(a3, f30), "0114b7c7 movfr2gr.s a3, f30");
+ COMPARE(movfr2gr_s(a3, f30), "0114b7c7 movfr2gr.s a3, f30");
- COMPARE(movfr2gr_d(a6, f30), "0114bbca movfr2gr.d a6, f30");
- COMPARE(movfr2gr_d(t7, f30), "0114bbd3 movfr2gr.d t7, f30");
+ COMPARE(movfr2gr_d(a6, f30), "0114bbca movfr2gr.d a6, f30");
+ COMPARE(movfr2gr_d(t7, f30), "0114bbd3 movfr2gr.d t7, f30");
- COMPARE(movfrh2gr_s(a5, f0), "0114bc09 movfrh2gr.s a5, f0");
- COMPARE(movfrh2gr_s(a4, f0), "0114bc08 movfrh2gr.s a4, f0");
+ COMPARE(movfrh2gr_s(a5, f0), "0114bc09 movfrh2gr.s a5, f0");
+ COMPARE(movfrh2gr_s(a4, f0), "0114bc08 movfrh2gr.s a4, f0");
- COMPARE(movgr2fcsr(a2), "0114c0c0 movgr2fcsr fcsr, a2");
- COMPARE(movfcsr2gr(a4), "0114c808 movfcsr2gr a4, fcsr");
+ COMPARE(movgr2fcsr(a2), "0114c0c0 movgr2fcsr fcsr, a2");
+ COMPARE(movfcsr2gr(a4), "0114c808 movfcsr2gr a4, fcsr");
COMPARE(movfr2cf(FCC0, f0), "0114d000 movfr2cf fcc0, f0");
COMPARE(movcf2fr(f1, FCC1), "0114d421 movcf2fr f1, fcc1");
@@ -843,38 +905,38 @@ TEST(TypeOp22) {
COMPARE(fcvt_s_d(f0, f0), "01191800 fcvt.s.d f0, f0");
COMPARE(fcvt_d_s(f0, f0), "01192400 fcvt.d.s f0, f0");
- COMPARE(ftintrm_w_s(f8, f9), "011a0528 ftintrm.w.s f8, f9");
- COMPARE(ftintrm_w_d(f10, f11), "011a096a ftintrm.w.d f10, f11");
- COMPARE(ftintrm_l_s(f12, f13), "011a25ac ftintrm.l.s f12, f13");
- COMPARE(ftintrm_l_d(f14, f15), "011a29ee ftintrm.l.d f14, f15");
+ COMPARE(ftintrm_w_s(f8, f9), "011a0528 ftintrm.w.s f8, f9");
+ COMPARE(ftintrm_w_d(f10, f11), "011a096a ftintrm.w.d f10, f11");
+ COMPARE(ftintrm_l_s(f12, f13), "011a25ac ftintrm.l.s f12, f13");
+ COMPARE(ftintrm_l_d(f14, f15), "011a29ee ftintrm.l.d f14, f15");
- COMPARE(ftintrp_w_s(f16, f17), "011a4630 ftintrp.w.s f16, f17");
- COMPARE(ftintrp_w_d(f18, f19), "011a4a72 ftintrp.w.d f18, f19");
- COMPARE(ftintrp_l_s(f20, f21), "011a66b4 ftintrp.l.s f20, f21");
- COMPARE(ftintrp_l_d(f0, f1), "011a6820 ftintrp.l.d f0, f1");
+ COMPARE(ftintrp_w_s(f16, f17), "011a4630 ftintrp.w.s f16, f17");
+ COMPARE(ftintrp_w_d(f18, f19), "011a4a72 ftintrp.w.d f18, f19");
+ COMPARE(ftintrp_l_s(f20, f21), "011a66b4 ftintrp.l.s f20, f21");
+ COMPARE(ftintrp_l_d(f0, f1), "011a6820 ftintrp.l.d f0, f1");
- COMPARE(ftintrz_w_s(f30, f4), "011a849e ftintrz.w.s f30, f4");
- COMPARE(ftintrz_w_d(f30, f4), "011a889e ftintrz.w.d f30, f4");
- COMPARE(ftintrz_l_s(f30, f0), "011aa41e ftintrz.l.s f30, f0");
- COMPARE(ftintrz_l_d(f30, f30), "011aabde ftintrz.l.d f30, f30");
+ COMPARE(ftintrz_w_s(f30, f4), "011a849e ftintrz.w.s f30, f4");
+ COMPARE(ftintrz_w_d(f30, f4), "011a889e ftintrz.w.d f30, f4");
+ COMPARE(ftintrz_l_s(f30, f0), "011aa41e ftintrz.l.s f30, f0");
+ COMPARE(ftintrz_l_d(f30, f30), "011aabde ftintrz.l.d f30, f30");
- COMPARE(ftintrne_w_s(f2, f3), "011ac462 ftintrne.w.s f2, f3");
- COMPARE(ftintrne_w_d(f4, f5), "011ac8a4 ftintrne.w.d f4, f5");
- COMPARE(ftintrne_l_s(f6, f7), "011ae4e6 ftintrne.l.s f6, f7");
- COMPARE(ftintrne_l_d(f8, f9), "011ae928 ftintrne.l.d f8, f9");
+ COMPARE(ftintrne_w_s(f2, f3), "011ac462 ftintrne.w.s f2, f3");
+ COMPARE(ftintrne_w_d(f4, f5), "011ac8a4 ftintrne.w.d f4, f5");
+ COMPARE(ftintrne_l_s(f6, f7), "011ae4e6 ftintrne.l.s f6, f7");
+ COMPARE(ftintrne_l_d(f8, f9), "011ae928 ftintrne.l.d f8, f9");
- COMPARE(ftint_w_s(f10, f11), "011b056a ftint.w.s f10, f11");
- COMPARE(ftint_w_d(f12, f13), "011b09ac ftint.w.d f12, f13");
- COMPARE(ftint_l_s(f14, f15), "011b25ee ftint.l.s f14, f15");
- COMPARE(ftint_l_d(f16, f17), "011b2a30 ftint.l.d f16, f17");
+ COMPARE(ftint_w_s(f10, f11), "011b056a ftint.w.s f10, f11");
+ COMPARE(ftint_w_d(f12, f13), "011b09ac ftint.w.d f12, f13");
+ COMPARE(ftint_l_s(f14, f15), "011b25ee ftint.l.s f14, f15");
+ COMPARE(ftint_l_d(f16, f17), "011b2a30 ftint.l.d f16, f17");
- COMPARE(ffint_s_w(f18, f19), "011d1272 ffint.s.w f18, f19");
- COMPARE(ffint_s_l(f20, f21), "011d1ab4 ffint.s.l f20, f21");
- COMPARE(ffint_d_w(f0, f1), "011d2020 ffint.d.w f0, f1");
- COMPARE(ffint_d_l(f2, f3), "011d2862 ffint.d.l f2, f3");
+ COMPARE(ffint_s_w(f18, f19), "011d1272 ffint.s.w f18, f19");
+ COMPARE(ffint_s_l(f20, f21), "011d1ab4 ffint.s.l f20, f21");
+ COMPARE(ffint_d_w(f0, f1), "011d2020 ffint.d.w f0, f1");
+ COMPARE(ffint_d_l(f2, f3), "011d2862 ffint.d.l f2, f3");
- COMPARE(frint_s(f4, f5), "011e44a4 frint.s f4, f5");
- COMPARE(frint_d(f6, f7), "011e48e6 frint.d f6, f7");
+ COMPARE(frint_s(f4, f5), "011e44a4 frint.s f4, f5");
+ COMPARE(frint_d(f6, f7), "011e48e6 frint.d f6, f7");
COMPARE(frecip_s(f8, f9), "01145528 frecip.s f8, f9");
COMPARE(frecip_d(f10, f11), "0114596a frecip.d f10, f11");
@@ -885,8 +947,8 @@ TEST(TypeOp22) {
COMPARE(fclass_s(f16, f17), "01143630 fclass.s f16, f17");
COMPARE(fclass_d(f18, f19), "01143a72 fclass.d f18, f19");
- COMPARE(flogb_s(f20, f21), "011426b4 flogb.s f20, f21");
- COMPARE(flogb_d(f0, f1), "01142820 flogb.d f0, f1");
+ COMPARE(flogb_s(f20, f21), "011426b4 flogb.s f20, f21");
+ COMPARE(flogb_d(f0, f1), "01142820 flogb.d f0, f1");
VERIFY_RUN();
}
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 95f1ab91d4..80cf93107c 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -27,16 +27,16 @@
#include <stdlib.h>
-#include "src/init/v8.h"
-
+#include "src/base/vector.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/macro-assembler.h"
#include "src/debug/debug.h"
#include "src/diagnostics/disasm.h"
#include "src/diagnostics/disassembler.h"
#include "src/execution/frames-inl.h"
-#include "src/utils/ostreams.h"
+#include "src/init/v8.h"
#include "src/objects/objects-inl.h"
+#include "src/utils/ostreams.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -45,244 +45,12 @@ namespace internal {
#define __ assm.
TEST(DisasmX64) {
- CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
v8::internal::byte buffer[8192];
Assembler assm(AssemblerOptions{},
ExternalAssemblerBuffer(buffer, sizeof buffer));
-
- // Short immediate instructions
- __ addq(rax, Immediate(12345678));
- __ orq(rax, Immediate(12345678));
- __ subq(rax, Immediate(12345678));
- __ xorq(rax, Immediate(12345678));
- __ andq(rax, Immediate(12345678));
-
- // ---- This one caused crash
- __ movq(rbx, Operand(rsp, rcx, times_2, 0)); // [rsp+rcx*4]
-
- // ---- All instructions that I can think of
- __ addq(rdx, rbx);
- __ addq(rdx, Operand(rbx, 0));
- __ addq(rdx, Operand(rbx, 16));
- __ addq(rdx, Operand(rbx, 1999));
- __ addq(rdx, Operand(rbx, -4));
- __ addq(rdx, Operand(rbx, -1999));
- __ addq(rdx, Operand(rsp, 0));
- __ addq(rdx, Operand(rsp, 16));
- __ addq(rdx, Operand(rsp, 1999));
- __ addq(rdx, Operand(rsp, -4));
- __ addq(rdx, Operand(rsp, -1999));
- __ nop();
- __ addq(rsi, Operand(rcx, times_4, 0));
- __ addq(rsi, Operand(rcx, times_4, 24));
- __ addq(rsi, Operand(rcx, times_4, -4));
- __ addq(rsi, Operand(rcx, times_4, -1999));
- __ nop();
- __ addq(rdi, Operand(rbp, rcx, times_4, 0));
- __ addq(rdi, Operand(rbp, rcx, times_4, 12));
- __ addq(rdi, Operand(rbp, rcx, times_4, -8));
- __ addq(rdi, Operand(rbp, rcx, times_4, -3999));
- __ addq(Operand(rbp, rcx, times_4, 12), Immediate(12));
-
- __ bswapl(rax);
- __ bswapq(rdi);
- __ bsrl(rax, r15);
- __ bsrl(r9, Operand(rcx, times_8, 91919));
-
- __ nop();
- __ addq(rbx, Immediate(12));
- __ nop();
- __ nop();
- __ andq(rdx, Immediate(3));
- __ andq(rdx, Operand(rsp, 4));
- __ cmpq(rdx, Immediate(3));
- __ cmpq(rdx, Operand(rsp, 4));
- __ cmpq(Operand(rbp, rcx, times_4, 0), Immediate(1000));
- __ cmpb(rbx, Operand(rbp, rcx, times_2, 0));
- __ cmpb(Operand(rbp, rcx, times_2, 0), rbx);
- __ orq(rdx, Immediate(3));
- __ xorq(rdx, Immediate(3));
- __ nop();
- __ cpuid();
- __ movsxbl(rdx, Operand(rcx, 0));
- __ movsxbq(rdx, Operand(rcx, 0));
- __ movsxwl(rdx, Operand(rcx, 0));
- __ movsxwq(rdx, Operand(rcx, 0));
- __ movzxbl(rdx, Operand(rcx, 0));
- __ movzxwl(rdx, Operand(rcx, 0));
- __ movzxbq(rdx, Operand(rcx, 0));
- __ movzxwq(rdx, Operand(rcx, 0));
-
- __ nop();
- __ imulq(rdx, rcx);
- __ shld(rdx, rcx);
- __ shrd(rdx, rcx);
- __ shlq(Operand(rdi, rax, times_4, 100), Immediate(1));
- __ shlq(Operand(rdi, rax, times_4, 100), Immediate(6));
- __ shlq(Operand(r15, 0), Immediate(1));
- __ shlq(Operand(r15, 0), Immediate(6));
- __ shlq_cl(Operand(r15, 0));
- __ shlq_cl(Operand(r15, 0));
- __ shlq_cl(Operand(rdi, rax, times_4, 100));
- __ shlq_cl(Operand(rdi, rax, times_4, 100));
- __ shlq(rdx, Immediate(1));
- __ shlq(rdx, Immediate(6));
- __ shll(Operand(rdi, rax, times_4, 100), Immediate(1));
- __ shll(Operand(rdi, rax, times_4, 100), Immediate(6));
- __ shll(Operand(r15, 0), Immediate(1));
- __ shll(Operand(r15, 0), Immediate(6));
- __ shll_cl(Operand(r15, 0));
- __ shll_cl(Operand(r15, 0));
- __ shll_cl(Operand(rdi, rax, times_4, 100));
- __ shll_cl(Operand(rdi, rax, times_4, 100));
- __ shll(rdx, Immediate(1));
- __ shll(rdx, Immediate(6));
- __ btq(Operand(rdx, 0), rcx);
- __ btsq(Operand(rdx, 0), rcx);
- __ btsq(Operand(rbx, rcx, times_4, 0), rcx);
- __ btsq(rcx, Immediate(13));
- __ btrq(rcx, Immediate(13));
- __ nop();
- __ pushq(Immediate(12));
- __ pushq(Immediate(23456));
- __ pushq(rcx);
- __ pushq(rsi);
- __ pushq(Operand(rbp, StandardFrameConstants::kFunctionOffset));
- __ pushq(Operand(rbx, rcx, times_4, 0));
- __ pushq(Operand(rbx, rcx, times_4, 0));
- __ pushq(Operand(rbx, rcx, times_4, 10000));
- __ popq(rdx);
- __ popq(rax);
- __ popq(Operand(rbx, rcx, times_4, 0));
- __ nop();
-
- __ addq(rdx, Operand(rsp, 16));
- __ addq(rdx, rcx);
- __ movb(rdx, Operand(rcx, 0));
- __ movb(rcx, Immediate(6));
- __ movb(Operand(rsp, 16), rdx);
- __ movw(Operand(rsp, 16), rdx);
- __ nop();
- __ movsxwq(rdx, Operand(rsp, 12));
- __ movsxbq(rdx, Operand(rsp, 12));
- __ movsxlq(rdx, Operand(rsp, 12));
- __ movzxwq(rdx, Operand(rsp, 12));
- __ movzxbq(rdx, Operand(rsp, 12));
- __ nop();
- __ movq(rdx, Immediate(1234567));
- __ movq(rdx, Operand(rsp, 12));
- __ movq(Operand(rbx, rcx, times_4, 10000), Immediate(12345));
- __ movq(Operand(rbx, rcx, times_4, 10000), rdx);
- __ nop();
- __ decb(rdx);
- __ decb(Operand(rax, 10));
- __ decb(Operand(rbx, rcx, times_4, 10000));
- __ decq(rdx);
- __ cdq();
-
- __ repstosl();
- __ repstosq();
-
- __ nop();
- __ idivq(rdx);
- __ mull(rdx);
- __ mulq(rdx);
-
- __ negb(rdx);
- __ negb(r10);
- __ negw(rdx);
- __ negl(rdx);
- __ negq(rdx);
- __ negb(Operand(rsp, 12));
- __ negw(Operand(rsp, 12));
- __ negl(Operand(rsp, 12));
- __ negb(Operand(rsp, 12));
-
- __ notq(rdx);
- __ testq(Operand(rbx, rcx, times_4, 10000), rdx);
-
- __ imulq(rdx, rcx, Immediate(12));
- __ imulq(rdx, rcx, Immediate(1000));
- __ imulq(rdx, Operand(rbx, rcx, times_4, 10000));
- __ imulq(rdx, Operand(rbx, rcx, times_4, 10000), Immediate(12));
- __ imulq(rdx, Operand(rbx, rcx, times_4, 10000), Immediate(1000));
- __ imull(r15, rcx, Immediate(12));
- __ imull(r15, rcx, Immediate(1000));
- __ imull(r15, Operand(rbx, rcx, times_4, 10000));
- __ imull(r15, Operand(rbx, rcx, times_4, 10000), Immediate(12));
- __ imull(r15, Operand(rbx, rcx, times_4, 10000), Immediate(1000));
-
- __ incq(rdx);
- __ incq(Operand(rbx, rcx, times_4, 10000));
- __ pushq(Operand(rbx, rcx, times_4, 10000));
- __ popq(Operand(rbx, rcx, times_4, 10000));
- __ jmp(Operand(rbx, rcx, times_4, 10000));
-
- __ leaq(rdx, Operand(rbx, rcx, times_4, 10000));
- __ orq(rdx, Immediate(12345));
- __ orq(rdx, Operand(rbx, rcx, times_4, 10000));
-
- __ nop();
-
- __ rclq(rdx, Immediate(1));
- __ rclq(rdx, Immediate(7));
- __ rcrq(rdx, Immediate(1));
- __ rcrq(rdx, Immediate(7));
- __ sarq(rdx, Immediate(1));
- __ sarq(rdx, Immediate(6));
- __ sarq_cl(rdx);
- __ sbbq(rdx, rbx);
- __ shld(rdx, rbx);
- __ shlq(rdx, Immediate(1));
- __ shlq(rdx, Immediate(6));
- __ shlq_cl(rdx);
- __ shrd(rdx, rbx);
- __ shrq(rdx, Immediate(1));
- __ shrq(rdx, Immediate(7));
- __ shrq_cl(rdx);
-
-
- // Immediates
-
- __ addq(rbx, Immediate(12));
- __ addq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
-
- __ andq(rbx, Immediate(12345));
-
- __ cmpq(rbx, Immediate(12345));
- __ cmpq(rbx, Immediate(12));
- __ cmpq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
- __ cmpb(rax, Immediate(100));
-
- __ orq(rbx, Immediate(12345));
-
- __ subq(rbx, Immediate(12));
- __ subq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
-
- __ xorq(rbx, Immediate(12345));
-
- __ imulq(rdx, rcx, Immediate(12));
- __ imulq(rdx, rcx, Immediate(1000));
-
- __ cld();
-
- __ subq(rdx, Operand(rbx, rcx, times_4, 10000));
- __ subq(rdx, rbx);
-
- __ testq(rdx, Immediate(12345));
- __ testq(Operand(rbx, rcx, times_8, 10000), rdx);
- __ testb(Operand(rcx, rbx, times_2, 1000), rdx);
- __ testb(Operand(rax, -20), Immediate(0x9A));
- __ nop();
-
- __ xorq(rdx, Immediate(12345));
- __ xorq(rdx, Operand(rbx, rcx, times_8, 10000));
- __ hlt();
- __ int3();
- __ ret(0);
- __ ret(8);
+ // Some instructions are tested in DisasmX64CheckOutput.
// Calls
@@ -344,181 +112,6 @@ TEST(DisasmX64) {
__ j(less_equal, &Ljcc);
__ j(greater, &Ljcc);
- // 0xD9 instructions
- __ nop();
-
- __ fld(1);
- __ fld1();
- __ fldz();
- __ fldpi();
- __ fabs();
- __ fchs();
- __ fprem();
- __ fprem1();
- __ fincstp();
- __ ftst();
- __ fxch(3);
- __ fld_s(Operand(rbx, rcx, times_4, 10000));
- __ fstp_s(Operand(rbx, rcx, times_4, 10000));
- __ ffree(3);
- __ fld_d(Operand(rbx, rcx, times_4, 10000));
- __ fstp_d(Operand(rbx, rcx, times_4, 10000));
- __ nop();
-
- __ fild_s(Operand(rbx, rcx, times_4, 10000));
- __ fistp_s(Operand(rbx, rcx, times_4, 10000));
- __ fild_d(Operand(rbx, rcx, times_4, 10000));
- __ fistp_d(Operand(rbx, rcx, times_4, 10000));
- __ fnstsw_ax();
- __ nop();
- __ fadd(3);
- __ fsub(3);
- __ fmul(3);
- __ fdiv(3);
-
- __ faddp(3);
- __ fsubp(3);
- __ fmulp(3);
- __ fdivp(3);
- __ fcompp();
- __ fwait();
- __ frndint();
- __ fninit();
- __ nop();
-
- // SSE instruction
- {
- // Move operation
- __ cvttss2si(rdx, Operand(rbx, rcx, times_4, 10000));
- __ cvttss2si(rdx, xmm1);
- __ cvtqsi2ss(xmm1, Operand(rbx, rcx, times_4, 10000));
- __ cvtqsi2ss(xmm1, rdx);
- __ cvttps2dq(xmm0, xmm1);
- __ cvttps2dq(xmm0, Operand(rbx, rcx, times_4, 10000));
- __ movaps(xmm0, xmm1);
- __ movaps(xmm0, Operand(rbx, rcx, times_4, 10000));
- __ movdqa(xmm0, Operand(rsp, 12));
- __ movdqa(Operand(rsp, 12), xmm0);
- __ movdqu(xmm0, Operand(rsp, 12));
- __ movdqu(Operand(rsp, 12), xmm0);
- __ movdqu(xmm1, xmm0);
- __ movhlps(xmm5, xmm1);
- __ movlps(xmm8, Operand(rbx, rcx, times_4, 10000));
- __ movlps(Operand(rbx, rcx, times_4, 10000), xmm9);
- __ movlhps(xmm5, xmm1);
- __ movhps(xmm8, Operand(rbx, rcx, times_4, 10000));
- __ movhps(Operand(rbx, rcx, times_4, 10000), xmm9);
- __ shufps(xmm0, xmm9, 0x0);
-
- __ ucomiss(xmm0, xmm1);
- __ ucomiss(xmm0, Operand(rbx, rcx, times_4, 10000));
-
- __ movmskps(rdx, xmm9);
-
-#define EMIT_SSE_INSTR(instruction, notUsed1, notUsed2) \
- __ instruction(xmm1, xmm0); \
- __ instruction(xmm1, Operand(rbx, rcx, times_4, 10000));
- SSE_BINOP_INSTRUCTION_LIST(EMIT_SSE_INSTR)
- SSE_UNOP_INSTRUCTION_LIST(EMIT_SSE_INSTR)
-#undef EMIT_SSE_INSTR
-
-#define EMIT_SSE_INSTR(instruction, notUsed1, notUsed2, notUse3) \
- __ instruction(xmm1, xmm0); \
- __ instruction(xmm1, Operand(rbx, rcx, times_4, 10000));
- SSE_INSTRUCTION_LIST_SS(EMIT_SSE_INSTR)
-#undef EMIT_SSE_INSTR
- }
-
- // SSE2 instructions
- {
- __ cvtdq2pd(xmm3, xmm4);
- __ cvttsd2si(rdx, Operand(rbx, rcx, times_4, 10000));
- __ cvttsd2si(rdx, xmm1);
- __ cvttsd2siq(rdx, xmm1);
- __ cvttsd2siq(rdx, Operand(rbx, rcx, times_4, 10000));
- __ cvtlsi2sd(xmm1, Operand(rbx, rcx, times_4, 10000));
- __ cvtlsi2sd(xmm1, rdx);
- __ cvtqsi2sd(xmm1, Operand(rbx, rcx, times_4, 10000));
- __ cvtqsi2sd(xmm1, rdx);
- __ cvtss2sd(xmm1, xmm9);
- __ cvtss2sd(xmm1, Operand(rbx, rcx, times_4, 10000));
- __ cvtsd2si(rdx, xmm9);
- __ cvtsd2siq(rdx, xmm9);
-
- __ movsd(xmm1, Operand(rbx, rcx, times_4, 10000));
- __ movsd(Operand(rbx, rcx, times_4, 10000), xmm1);
- // 128 bit move instructions.
- __ movupd(xmm0, Operand(rbx, rcx, times_4, 10000));
- __ movupd(Operand(rbx, rcx, times_4, 10000), xmm0);
- __ movdqa(xmm0, Operand(rbx, rcx, times_4, 10000));
- __ movdqa(Operand(rbx, rcx, times_4, 10000), xmm0);
- __ movdqa(xmm0, xmm1);
-
- __ ucomisd(xmm0, xmm1);
- __ ucomisd(xmm8, Operand(rbx, rdx, times_4, 10000));
-
- __ cmpltsd(xmm3, xmm11);
-
- __ movmskpd(rdx, xmm9);
- __ pmovmskb(rdx, xmm9);
-
- __ pcmpeqd(xmm1, xmm0);
-
- __ punpckldq(xmm1, xmm11);
- __ punpckldq(xmm5, Operand(rdx, 4));
- __ punpckhdq(xmm8, xmm15);
-
- __ pshuflw(xmm2, xmm4, 3);
- __ pshufhw(xmm1, xmm9, 6);
-
-#define EMIT_SSE2_INSTR(instruction, notUsed1, notUsed2, notUsed3) \
- __ instruction(xmm5, xmm1); \
- __ instruction(xmm5, Operand(rdx, 4));
-
- SSE2_INSTRUCTION_LIST(EMIT_SSE2_INSTR)
- SSE2_UNOP_INSTRUCTION_LIST(EMIT_SSE2_INSTR)
- SSE2_INSTRUCTION_LIST_SD(EMIT_SSE2_INSTR)
-#undef EMIT_SSE2_INSTR
-
-#define EMIT_SSE2_SHIFT_IMM(instruction, notUsed1, notUsed2, notUsed3, \
- notUsed4) \
- __ instruction(xmm3, 0xA3);
- SSE2_INSTRUCTION_LIST_SHIFT_IMM(EMIT_SSE2_SHIFT_IMM)
-#undef EMIT_SSE2_SHIFT_IMM
- }
-
- // cmov.
- {
- __ cmovq(overflow, rax, Operand(rax, 0));
- __ cmovq(no_overflow, rax, Operand(rax, 1));
- __ cmovq(below, rax, Operand(rax, 2));
- __ cmovq(above_equal, rax, Operand(rax, 3));
- __ cmovq(equal, rax, Operand(rbx, 0));
- __ cmovq(not_equal, rax, Operand(rbx, 1));
- __ cmovq(below_equal, rax, Operand(rbx, 2));
- __ cmovq(above, rax, Operand(rbx, 3));
- __ cmovq(sign, rax, Operand(rcx, 0));
- __ cmovq(not_sign, rax, Operand(rcx, 1));
- __ cmovq(parity_even, rax, Operand(rcx, 2));
- __ cmovq(parity_odd, rax, Operand(rcx, 3));
- __ cmovq(less, rax, Operand(rdx, 0));
- __ cmovq(greater_equal, rax, Operand(rdx, 1));
- __ cmovq(less_equal, rax, Operand(rdx, 2));
- __ cmovq(greater, rax, Operand(rdx, 3));
- }
-
- {
- if (CpuFeatures::IsSupported(SSE3)) {
- CpuFeatureScope scope(&assm, SSE3);
- __ haddps(xmm1, xmm0);
- __ haddps(xmm1, Operand(rbx, rcx, times_4, 10000));
- __ lddqu(xmm1, Operand(rdx, 4));
- __ movddup(xmm1, Operand(rax, 5));
- __ movddup(xmm1, xmm2);
- __ movshdup(xmm1, xmm2);
- }
- }
-
#define EMIT_SSE34_INSTR(instruction, notUsed1, notUsed2, notUsed3, notUsed4) \
__ instruction(xmm5, xmm1); \
__ instruction(xmm5, Operand(rdx, 4));
@@ -529,16 +122,6 @@ TEST(DisasmX64) {
__ instruction(Operand(rax, 10), xmm0, 1);
{
- if (CpuFeatures::IsSupported(SSSE3)) {
- CpuFeatureScope scope(&assm, SSSE3);
- __ palignr(xmm5, xmm1, 5);
- __ palignr(xmm5, Operand(rdx, 4), 5);
- SSSE3_INSTRUCTION_LIST(EMIT_SSE34_INSTR)
- SSSE3_UNOP_INSTRUCTION_LIST(EMIT_SSE34_INSTR)
- }
- }
-
- {
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope scope(&assm, SSE4_1);
__ insertps(xmm5, xmm1, 123);
@@ -724,6 +307,8 @@ TEST(DisasmX64) {
__ vxorps(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
__ vhaddps(xmm0, xmm1, xmm9);
__ vhaddps(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+ __ vhaddps(ymm0, ymm1, ymm2);
+ __ vhaddps(ymm0, ymm1, Operand(rbx, rcx, times_4, 10000));
__ vpcmpeqd(xmm0, xmm15, xmm5);
__ vpcmpeqd(xmm15, xmm0, Operand(rbx, rcx, times_4, 10000));
@@ -1057,31 +642,639 @@ TEST(DisasmX64) {
#endif
}
-TEST(DisasmX64YMMRegister) {
- if (!CpuFeatures::IsSupported(AVX)) return;
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope scope(isolate);
- v8::internal::byte buffer[8192];
- Assembler assm(AssemblerOptions{},
- ExternalAssemblerBuffer(buffer, sizeof buffer));
- CpuFeatureScope fscope(&assm, AVX);
+constexpr int kAssemblerBufferSize = 8192;
- __ vmovdqa(ymm0, ymm1);
+// Helper to package up all the required classes for disassembling into a
+// buffer using |InstructionDecode|.
+struct DisassemblerTester {
+ DisassemblerTester()
+ : assm_(AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer_, sizeof(buffer_))),
+ disasm(converter_) {}
- base::Vector<char> actual = base::Vector<char>::New(37);
- disasm::NameConverter converter;
- disasm::Disassembler disassembler(converter);
- disassembler.InstructionDecode(actual, buffer);
-#ifdef OBJECT_PRINT
- fprintf(stdout, "Disassembled buffer: %s\n", actual.begin());
-#endif
+ std::string InstructionDecode() {
+ disasm.InstructionDecode(disasm_buffer, buffer_ + prev_offset);
+ return std::string{disasm_buffer.begin()};
+ }
+
+ int pc_offset() { return assm_.pc_offset(); }
+
+ Assembler* assm() { return &assm_; }
+
+ v8::internal::byte buffer_[kAssemblerBufferSize];
+ Assembler assm_;
+ disasm::NameConverter converter_;
+ disasm::Disassembler disasm;
+ base::EmbeddedVector<char, 128> disasm_buffer;
+ int prev_offset = 0;
+};
+
+// Helper macro to compare the disassembly of an assembler function call with
+// the expected disassembly output. We reuse |Assembler|, so we need to keep
+// track of the offset into |buffer| which the Assembler has used, and
+// disassemble the instruction at that offset.
+// Requires a DisassemblerTester named t.
+#define COMPARE(str, ASM) \
+ t.prev_offset = t.pc_offset(); \
+ t.assm_.ASM; \
+ CHECK_EQ(str, t.InstructionDecode());
+
+// Tests that compares the checks the disassembly output with an expected
+// string.
+UNINITIALIZED_TEST(DisasmX64CheckOutput) {
+ DisassemblerTester t;
+
+ // Short immediate instructions
+ COMPARE("48054e61bc00 REX.W add rax,0xbc614e",
+ addq(rax, Immediate(12345678)));
+ COMPARE("480d4e61bc00 REX.W or rax,0xbc614e",
+ orq(rax, Immediate(12345678)));
+ COMPARE("482d4e61bc00 REX.W sub rax,0xbc614e",
+ subq(rax, Immediate(12345678)));
+ COMPARE("48354e61bc00 REX.W xor rax,0xbc614e",
+ xorq(rax, Immediate(12345678)));
+ COMPARE("48254e61bc00 REX.W and rax,0xbc614e",
+ andq(rax, Immediate(12345678)));
+ COMPARE("488b1c4c REX.W movq rbx,[rsp+rcx*2]",
+ movq(rbx, Operand(rsp, rcx, times_2, 0))); // [rsp+rcx*2);
+ COMPARE("4803d3 REX.W addq rdx,rbx", addq(rdx, rbx));
+ COMPARE("480313 REX.W addq rdx,[rbx]",
+ addq(rdx, Operand(rbx, 0)));
+ COMPARE("48035310 REX.W addq rdx,[rbx+0x10]",
+ addq(rdx, Operand(rbx, 16)));
+ COMPARE("480393cf070000 REX.W addq rdx,[rbx+0x7cf]",
+ addq(rdx, Operand(rbx, 1999)));
+ COMPARE("480353fc REX.W addq rdx,[rbx-0x4]",
+ addq(rdx, Operand(rbx, -4)));
+ COMPARE("48039331f8ffff REX.W addq rdx,[rbx-0x7cf]",
+ addq(rdx, Operand(rbx, -1999)));
+ COMPARE("48031424 REX.W addq rdx,[rsp]",
+ addq(rdx, Operand(rsp, 0)));
+ COMPARE("4803542410 REX.W addq rdx,[rsp+0x10]",
+ addq(rdx, Operand(rsp, 16)));
+ COMPARE("48039424cf070000 REX.W addq rdx,[rsp+0x7cf]",
+ addq(rdx, Operand(rsp, 1999)));
+ COMPARE("48035424fc REX.W addq rdx,[rsp-0x4]",
+ addq(rdx, Operand(rsp, -4)));
+ COMPARE("4803942431f8ffff REX.W addq rdx,[rsp-0x7cf]",
+ addq(rdx, Operand(rsp, -1999)));
+ COMPARE("4803348d00000000 REX.W addq rsi,[rcx*4+0x0]",
+ addq(rsi, Operand(rcx, times_4, 0)));
+ COMPARE("4803348d18000000 REX.W addq rsi,[rcx*4+0x18]",
+ addq(rsi, Operand(rcx, times_4, 24)));
+ COMPARE("4803348dfcffffff REX.W addq rsi,[rcx*4-0x4]",
+ addq(rsi, Operand(rcx, times_4, -4)));
+ COMPARE("4803348d31f8ffff REX.W addq rsi,[rcx*4-0x7cf]",
+ addq(rsi, Operand(rcx, times_4, -1999)));
+ COMPARE("48037c8d00 REX.W addq rdi,[rbp+rcx*4+0x0]",
+ addq(rdi, Operand(rbp, rcx, times_4, 0)));
+ COMPARE("48037c8d0c REX.W addq rdi,[rbp+rcx*4+0xc]",
+ addq(rdi, Operand(rbp, rcx, times_4, 12)));
+ COMPARE("48037c8df8 REX.W addq rdi,[rbp+rcx*4-0x8]",
+ addq(rdi, Operand(rbp, rcx, times_4, -8)));
+ COMPARE("4803bc8d61f0ffff REX.W addq rdi,[rbp+rcx*4-0xf9f]",
+ addq(rdi, Operand(rbp, rcx, times_4, -3999)));
+ COMPARE("4883448d0c0c REX.W addq [rbp+rcx*4+0xc],0xc",
+ addq(Operand(rbp, rcx, times_4, 12), Immediate(12)));
+
+ COMPARE("400fc8 bswapl rax", bswapl(rax));
+ COMPARE("480fcf REX.W bswapq rdi", bswapq(rdi));
+ COMPARE("410fbdc7 bsrl rax,r15", bsrl(rax, r15));
+ COMPARE("440fbd0ccd0f670100 bsrl r9,[rcx*8+0x1670f]",
+ bsrl(r9, Operand(rcx, times_8, 91919)));
+
+ COMPARE("90 nop", nop());
+ COMPARE("4883c30c REX.W addq rbx,0xc", addq(rbx, Immediate(12)));
+ COMPARE("4883e203 REX.W andq rdx,0x3", andq(rdx, Immediate(3)));
+ COMPARE("4823542404 REX.W andq rdx,[rsp+0x4]",
+ andq(rdx, Operand(rsp, 4)));
+ COMPARE("4883fa03 REX.W cmpq rdx,0x3", cmpq(rdx, Immediate(3)));
+ COMPARE("483b542404 REX.W cmpq rdx,[rsp+0x4]",
+ cmpq(rdx, Operand(rsp, 4)));
+ COMPARE("48817c8d00e8030000 REX.W cmpq [rbp+rcx*4+0x0],0x3e8",
+ cmpq(Operand(rbp, rcx, times_4, 0), Immediate(1000)));
+ COMPARE("3a5c4d00 cmpb bl,[rbp+rcx*2+0x0]",
+ cmpb(rbx, Operand(rbp, rcx, times_2, 0)));
+ COMPARE("385c4d00 cmpb [rbp+rcx*2+0x0],bl",
+ cmpb(Operand(rbp, rcx, times_2, 0), rbx));
+ COMPARE("4883ca03 REX.W orq rdx,0x3", orq(rdx, Immediate(3)));
+ COMPARE("4883f203 REX.W xorq rdx,0x3", xorq(rdx, Immediate(3)));
+ COMPARE("90 nop", nop());
+ COMPARE("0fa2 cpuid", cpuid());
+ COMPARE("0fbe11 movsxbl rdx,[rcx]",
+ movsxbl(rdx, Operand(rcx, 0)));
+ COMPARE("480fbe11 REX.W movsxbq rdx,[rcx]",
+ movsxbq(rdx, Operand(rcx, 0)));
+ COMPARE("0fbf11 movsxwl rdx,[rcx]",
+ movsxwl(rdx, Operand(rcx, 0)));
+ COMPARE("480fbf11 REX.W movsxwq rdx,[rcx]",
+ movsxwq(rdx, Operand(rcx, 0)));
+ COMPARE("0fb611 movzxbl rdx,[rcx]",
+ movzxbl(rdx, Operand(rcx, 0)));
+ COMPARE("0fb711 movzxwl rdx,[rcx]",
+ movzxwl(rdx, Operand(rcx, 0)));
+ COMPARE("0fb611 movzxbl rdx,[rcx]",
+ movzxbq(rdx, Operand(rcx, 0)));
+ COMPARE("0fb711 movzxwl rdx,[rcx]",
+ movzxwq(rdx, Operand(rcx, 0)));
+
+ COMPARE("480fafd1 REX.W imulq rdx,rcx", imulq(rdx, rcx));
+ COMPARE("480fa5ca REX.W shld rdx,rcx,cl", shld(rdx, rcx));
+ COMPARE("480fadca REX.W shrd rdx,rcx,cl", shrd(rdx, rcx));
+ COMPARE("48d1648764 REX.W shlq [rdi+rax*4+0x64], 1",
+ shlq(Operand(rdi, rax, times_4, 100), Immediate(1)));
+ COMPARE("48c164876406 REX.W shlq [rdi+rax*4+0x64], 6",
+ shlq(Operand(rdi, rax, times_4, 100), Immediate(6)));
+ COMPARE("49d127 REX.W shlq [r15], 1",
+ shlq(Operand(r15, 0), Immediate(1)));
+ COMPARE("49c12706 REX.W shlq [r15], 6",
+ shlq(Operand(r15, 0), Immediate(6)));
+ COMPARE("49d327 REX.W shlq [r15], cl",
+ shlq_cl(Operand(r15, 0)));
+ COMPARE("49d327 REX.W shlq [r15], cl",
+ shlq_cl(Operand(r15, 0)));
+ COMPARE("48d3648764 REX.W shlq [rdi+rax*4+0x64], cl",
+ shlq_cl(Operand(rdi, rax, times_4, 100)));
+ COMPARE("48d3648764 REX.W shlq [rdi+rax*4+0x64], cl",
+ shlq_cl(Operand(rdi, rax, times_4, 100)));
+ COMPARE("48d1e2 REX.W shlq rdx, 1", shlq(rdx, Immediate(1)));
+ COMPARE("48c1e206 REX.W shlq rdx, 6", shlq(rdx, Immediate(6)));
+ COMPARE("d1648764 shll [rdi+rax*4+0x64], 1",
+ shll(Operand(rdi, rax, times_4, 100), Immediate(1)));
+ COMPARE("c164876406 shll [rdi+rax*4+0x64], 6",
+ shll(Operand(rdi, rax, times_4, 100), Immediate(6)));
+ COMPARE("41d127 shll [r15], 1",
+ shll(Operand(r15, 0), Immediate(1)));
+ COMPARE("41c12706 shll [r15], 6",
+ shll(Operand(r15, 0), Immediate(6)));
+ COMPARE("41d327 shll [r15], cl", shll_cl(Operand(r15, 0)));
+ COMPARE("41d327 shll [r15], cl", shll_cl(Operand(r15, 0)));
+ COMPARE("d3648764 shll [rdi+rax*4+0x64], cl",
+ shll_cl(Operand(rdi, rax, times_4, 100)));
+ COMPARE("d3648764 shll [rdi+rax*4+0x64], cl",
+ shll_cl(Operand(rdi, rax, times_4, 100)));
+ COMPARE("d1e2 shll rdx, 1", shll(rdx, Immediate(1)));
+ COMPARE("c1e206 shll rdx, 6", shll(rdx, Immediate(6)));
+ COMPARE("480fa30a REX.W bt [rdx],rcx,cl",
+ btq(Operand(rdx, 0), rcx));
+ COMPARE("480fab0a REX.W bts [rdx],rcx",
+ btsq(Operand(rdx, 0), rcx));
+ COMPARE("480fab0c8b REX.W bts [rbx+rcx*4],rcx",
+ btsq(Operand(rbx, rcx, times_4, 0), rcx));
+ COMPARE("480fbae90d REX.W bts rcx,13", btsq(rcx, Immediate(13)));
+ COMPARE("480fbaf10d REX.W btr rcx,13", btrq(rcx, Immediate(13)));
+ COMPARE("6a0c push 0xc", pushq(Immediate(12)));
+ COMPARE("68a05b0000 push 0x5ba0", pushq(Immediate(23456)));
+ COMPARE("51 push rcx", pushq(rcx));
+ COMPARE("56 push rsi", pushq(rsi));
+ COMPARE("ff75f0 push [rbp-0x10]",
+ pushq(Operand(rbp, StandardFrameConstants::kFunctionOffset)));
+ COMPARE("ff348b push [rbx+rcx*4]",
+ pushq(Operand(rbx, rcx, times_4, 0)));
+ COMPARE("ff348b push [rbx+rcx*4]",
+ pushq(Operand(rbx, rcx, times_4, 0)));
+ COMPARE("ffb48b10270000 push [rbx+rcx*4+0x2710]",
+ pushq(Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("5a pop rdx", popq(rdx));
+ COMPARE("58 pop rax", popq(rax));
+ COMPARE("8f048b pop [rbx+rcx*4]",
+ popq(Operand(rbx, rcx, times_4, 0)));
+
+ COMPARE("4803542410 REX.W addq rdx,[rsp+0x10]",
+ addq(rdx, Operand(rsp, 16)));
+ COMPARE("4803d1 REX.W addq rdx,rcx", addq(rdx, rcx));
+ COMPARE("8a11 movb dl,[rcx]", movb(rdx, Operand(rcx, 0)));
+ COMPARE("b106 movb cl,6", movb(rcx, Immediate(6)));
+ COMPARE("88542410 movb [rsp+0x10],dl",
+ movb(Operand(rsp, 16), rdx));
+ COMPARE("6689542410 movw [rsp+0x10],rdx",
+ movw(Operand(rsp, 16), rdx));
+ COMPARE("90 nop", nop());
+ COMPARE("480fbf54240c REX.W movsxwq rdx,[rsp+0xc]",
+ movsxwq(rdx, Operand(rsp, 12)));
+ COMPARE("480fbe54240c REX.W movsxbq rdx,[rsp+0xc]",
+ movsxbq(rdx, Operand(rsp, 12)));
+ COMPARE("486354240c REX.W movsxlq rdx,[rsp+0xc]",
+ movsxlq(rdx, Operand(rsp, 12)));
+ COMPARE("0fb754240c movzxwl rdx,[rsp+0xc]",
+ movzxwq(rdx, Operand(rsp, 12)));
+ COMPARE("0fb654240c movzxbl rdx,[rsp+0xc]",
+ movzxbq(rdx, Operand(rsp, 12)));
+ COMPARE("90 nop", nop());
+ COMPARE("48c7c287d61200 REX.W movq rdx,0x12d687",
+ movq(rdx, Immediate(1234567)));
+ COMPARE("488b54240c REX.W movq rdx,[rsp+0xc]",
+ movq(rdx, Operand(rsp, 12)));
+ COMPARE("48c7848b1027000039300000 REX.W movq [rbx+rcx*4+0x2710],0x3039",
+ movq(Operand(rbx, rcx, times_4, 10000), Immediate(12345)));
+ COMPARE("4889948b10270000 REX.W movq [rbx+rcx*4+0x2710],rdx",
+ movq(Operand(rbx, rcx, times_4, 10000), rdx));
+ COMPARE("90 nop", nop());
+ COMPARE("feca decb dl", decb(rdx));
+ COMPARE("fe480a decb [rax+0xa]", decb(Operand(rax, 10)));
+ COMPARE("fe8c8b10270000 decb [rbx+rcx*4+0x2710]",
+ decb(Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("48ffca REX.W decq rdx", decq(rdx));
+ COMPARE("99 cdql", cdq());
+
+ COMPARE("f3ab rep stosl", repstosl());
+ COMPARE("f348ab REX.W rep stosq", repstosq());
+
+ COMPARE("48f7fa REX.W idivq rdx", idivq(rdx));
+ COMPARE("f7e2 mull rdx", mull(rdx));
+ COMPARE("48f7e2 REX.W mulq rdx", mulq(rdx));
+
+ COMPARE("f6da negb rdx", negb(rdx));
+ COMPARE("41f6da negb r10", negb(r10));
+ COMPARE("66f7da negw rdx", negw(rdx));
+ COMPARE("f7da negl rdx", negl(rdx));
+ COMPARE("48f7da REX.W negq rdx", negq(rdx));
+ COMPARE("f65c240c negb [rsp+0xc]", negb(Operand(rsp, 12)));
+ COMPARE("66f75c240c negw [rsp+0xc]", negw(Operand(rsp, 12)));
+ COMPARE("f75c240c negl [rsp+0xc]", negl(Operand(rsp, 12)));
+ COMPARE("f65c240c negb [rsp+0xc]", negb(Operand(rsp, 12)));
+
+ COMPARE("48f7d2 REX.W notq rdx", notq(rdx));
+ COMPARE("4885948b10270000 REX.W testq rdx,[rbx+rcx*4+0x2710]",
+ testq(Operand(rbx, rcx, times_4, 10000), rdx));
+
+ COMPARE("486bd10c REX.W imulq rdx,rcx,0xc",
+ imulq(rdx, rcx, Immediate(12)));
+ COMPARE("4869d1e8030000 REX.W imulq rdx,rcx,0x3e8",
+ imulq(rdx, rcx, Immediate(1000)));
+ COMPARE("480faf948b10270000 REX.W imulq rdx,[rbx+rcx*4+0x2710]",
+ imulq(rdx, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("486b948b102700000c REX.W imulq rdx,[rbx+rcx*4+0x2710],0xc",
+ imulq(rdx, Operand(rbx, rcx, times_4, 10000), Immediate(12)));
+ COMPARE("4869948b10270000e8030000 REX.W imulq rdx,[rbx+rcx*4+0x2710],0x3e8",
+ imulq(rdx, Operand(rbx, rcx, times_4, 10000), Immediate(1000)));
+ COMPARE("446bf90c imull r15,rcx,0xc",
+ imull(r15, rcx, Immediate(12)));
+ COMPARE("4469f9e8030000 imull r15,rcx,0x3e8",
+ imull(r15, rcx, Immediate(1000)));
+ COMPARE("440fafbc8b10270000 imull r15,[rbx+rcx*4+0x2710]",
+ imull(r15, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("446bbc8b102700000c imull r15,[rbx+rcx*4+0x2710],0xc",
+ imull(r15, Operand(rbx, rcx, times_4, 10000), Immediate(12)));
+ COMPARE("4469bc8b10270000e8030000 imull r15,[rbx+rcx*4+0x2710],0x3e8",
+ imull(r15, Operand(rbx, rcx, times_4, 10000), Immediate(1000)));
+
+ COMPARE("48ffc2 REX.W incq rdx", incq(rdx));
+ COMPARE("48ff848b10270000 REX.W incq [rbx+rcx*4+0x2710]",
+ incq(Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("ffb48b10270000 push [rbx+rcx*4+0x2710]",
+ pushq(Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("8f848b10270000 pop [rbx+rcx*4+0x2710]",
+ popq(Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("ffa48b10270000 jmp [rbx+rcx*4+0x2710]",
+ jmp(Operand(rbx, rcx, times_4, 10000)));
+
+ COMPARE("488d948b10270000 REX.W leaq rdx,[rbx+rcx*4+0x2710]",
+ leaq(rdx, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("4881ca39300000 REX.W orq rdx,0x3039",
+ orq(rdx, Immediate(12345)));
+ COMPARE("480b948b10270000 REX.W orq rdx,[rbx+rcx*4+0x2710]",
+ orq(rdx, Operand(rbx, rcx, times_4, 10000)));
+
+ COMPARE("48d1d2 REX.W rclq rdx, 1", rclq(rdx, Immediate(1)));
+ COMPARE("48c1d207 REX.W rclq rdx, 7", rclq(rdx, Immediate(7)));
+ COMPARE("48d1da REX.W rcrq rdx, 1", rcrq(rdx, Immediate(1)));
+ COMPARE("48c1da07 REX.W rcrq rdx, 7", rcrq(rdx, Immediate(7)));
+ COMPARE("48d1fa REX.W sarq rdx, 1", sarq(rdx, Immediate(1)));
+ COMPARE("48c1fa06 REX.W sarq rdx, 6", sarq(rdx, Immediate(6)));
+ COMPARE("48d3fa REX.W sarq rdx, cl", sarq_cl(rdx));
+ COMPARE("481bd3 REX.W sbbq rdx,rbx", sbbq(rdx, rbx));
+ COMPARE("480fa5da REX.W shld rdx,rbx,cl", shld(rdx, rbx));
+ COMPARE("48d1e2 REX.W shlq rdx, 1", shlq(rdx, Immediate(1)));
+ COMPARE("48c1e206 REX.W shlq rdx, 6", shlq(rdx, Immediate(6)));
+ COMPARE("48d3e2 REX.W shlq rdx, cl", shlq_cl(rdx));
+ COMPARE("480fadda REX.W shrd rdx,rbx,cl", shrd(rdx, rbx));
+ COMPARE("48d1ea REX.W shrq rdx, 1", shrq(rdx, Immediate(1)));
+ COMPARE("48c1ea07 REX.W shrq rdx, 7", shrq(rdx, Immediate(7)));
+ COMPARE("48d3ea REX.W shrq rdx, cl", shrq_cl(rdx));
+
+ COMPARE("4883c30c REX.W addq rbx,0xc", addq(rbx, Immediate(12)));
+ COMPARE("4883848a102700000c REX.W addq [rdx+rcx*4+0x2710],0xc",
+ addq(Operand(rdx, rcx, times_4, 10000), Immediate(12)));
+ COMPARE("4881e339300000 REX.W andq rbx,0x3039",
+ andq(rbx, Immediate(12345)));
+
+ COMPARE("4881fb39300000 REX.W cmpq rbx,0x3039",
+ cmpq(rbx, Immediate(12345)));
+ COMPARE("4883fb0c REX.W cmpq rbx,0xc", cmpq(rbx, Immediate(12)));
+ COMPARE("4883bc8a102700000c REX.W cmpq [rdx+rcx*4+0x2710],0xc",
+ cmpq(Operand(rdx, rcx, times_4, 10000), Immediate(12)));
+ COMPARE("80f864 cmpb al,0x64", cmpb(rax, Immediate(100)));
+
+ COMPARE("4881cb39300000 REX.W orq rbx,0x3039",
+ orq(rbx, Immediate(12345)));
+ COMPARE("4883eb0c REX.W subq rbx,0xc", subq(rbx, Immediate(12)));
+ COMPARE("4883ac8a102700000c REX.W subq [rdx+rcx*4+0x2710],0xc",
+ subq(Operand(rdx, rcx, times_4, 10000), Immediate(12)));
+ COMPARE("4881f339300000 REX.W xorq rbx,0x3039",
+ xorq(rbx, Immediate(12345)));
+ COMPARE("486bd10c REX.W imulq rdx,rcx,0xc",
+ imulq(rdx, rcx, Immediate(12)));
+ COMPARE("4869d1e8030000 REX.W imulq rdx,rcx,0x3e8",
+ imulq(rdx, rcx, Immediate(1000)));
+
+ COMPARE("fc cldl", cld());
+
+ COMPARE("482b948b10270000 REX.W subq rdx,[rbx+rcx*4+0x2710]",
+ subq(rdx, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("482bd3 REX.W subq rdx,rbx", subq(rdx, rbx));
+
+ COMPARE("66f7c23930 testw rdx,0x3039",
+ testq(rdx, Immediate(12345)));
+ COMPARE("488594cb10270000 REX.W testq rdx,[rbx+rcx*8+0x2710]",
+ testq(Operand(rbx, rcx, times_8, 10000), rdx));
+ COMPARE("849459e8030000 testb dl,[rcx+rbx*2+0x3e8]",
+ testb(Operand(rcx, rbx, times_2, 1000), rdx));
+ COMPARE("f640ec9a testb [rax-0x14],0x9a",
+ testb(Operand(rax, -20), Immediate(0x9A)));
+
+ COMPARE("4881f239300000 REX.W xorq rdx,0x3039",
+ xorq(rdx, Immediate(12345)));
+ COMPARE("483394cb10270000 REX.W xorq rdx,[rbx+rcx*8+0x2710]",
+ xorq(rdx, Operand(rbx, rcx, times_8, 10000)));
+ COMPARE("f4 hltl", hlt());
+ COMPARE("cc int3l", int3());
+ COMPARE("c3 retl", ret(0));
+ COMPARE("c20800 ret 0x8", ret(8));
+
+ // 0xD9 instructions
+ COMPARE("d9c1 fld st1", fld(1));
+ COMPARE("d9e8 fld1", fld1());
+ COMPARE("d9ee fldz", fldz());
+ COMPARE("d9eb fldpi", fldpi());
+ COMPARE("d9e1 fabs", fabs());
+ COMPARE("d9e0 fchs", fchs());
+ COMPARE("d9f8 fprem", fprem());
+ COMPARE("d9f5 fprem1", fprem1());
+ COMPARE("d9f7 fincstp", fincstp());
+ COMPARE("d9e4 ftst", ftst());
+ COMPARE("d9cb fxch st3", fxch(3));
+ COMPARE("d9848b10270000 fld_s [rbx+rcx*4+0x2710]",
+ fld_s(Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("d99c8b10270000 fstp_s [rbx+rcx*4+0x2710]",
+ fstp_s(Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("ddc3 ffree st3", ffree(3));
+ COMPARE("dd848b10270000 fld_d [rbx+rcx*4+0x2710]",
+ fld_d(Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("dd9c8b10270000 fstp_d [rbx+rcx*4+0x2710]",
+ fstp_d(Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("db848b10270000 fild_s [rbx+rcx*4+0x2710]",
+ fild_s(Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("db9c8b10270000 fistp_s [rbx+rcx*4+0x2710]",
+ fistp_s(Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("dfac8b10270000 fild_d [rbx+rcx*4+0x2710]",
+ fild_d(Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("dfbc8b10270000 fistp_d [rbx+rcx*4+0x2710]",
+ fistp_d(Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("dfe0 fnstsw_ax", fnstsw_ax());
+ COMPARE("dcc3 fadd st3", fadd(3));
+ COMPARE("dceb fsub st3", fsub(3));
+ COMPARE("dccb fmul st3", fmul(3));
+ COMPARE("dcfb fdiv st3", fdiv(3));
+ COMPARE("dec3 faddp st3", faddp(3));
+ COMPARE("deeb fsubp st3", fsubp(3));
+ COMPARE("decb fmulp st3", fmulp(3));
+ COMPARE("defb fdivp st3", fdivp(3));
+ COMPARE("ded9 fcompp", fcompp());
+ COMPARE("9b fwaitl", fwait());
+ COMPARE("d9fc frndint", frndint());
+ COMPARE("dbe3 fninit", fninit());
+
+ COMPARE("480f4000 REX.W cmovoq rax,[rax]",
+ cmovq(overflow, rax, Operand(rax, 0)));
+ COMPARE("480f414001 REX.W cmovnoq rax,[rax+0x1]",
+ cmovq(no_overflow, rax, Operand(rax, 1)));
+ COMPARE("480f424002 REX.W cmovcq rax,[rax+0x2]",
+ cmovq(below, rax, Operand(rax, 2)));
+ COMPARE("480f434003 REX.W cmovncq rax,[rax+0x3]",
+ cmovq(above_equal, rax, Operand(rax, 3)));
+ COMPARE("480f4403 REX.W cmovzq rax,[rbx]",
+ cmovq(equal, rax, Operand(rbx, 0)));
+ COMPARE("480f454301 REX.W cmovnzq rax,[rbx+0x1]",
+ cmovq(not_equal, rax, Operand(rbx, 1)));
+ COMPARE("480f464302 REX.W cmovnaq rax,[rbx+0x2]",
+ cmovq(below_equal, rax, Operand(rbx, 2)));
+ COMPARE("480f474303 REX.W cmovaq rax,[rbx+0x3]",
+ cmovq(above, rax, Operand(rbx, 3)));
+ COMPARE("480f4801 REX.W cmovsq rax,[rcx]",
+ cmovq(sign, rax, Operand(rcx, 0)));
+ COMPARE("480f494101 REX.W cmovnsq rax,[rcx+0x1]",
+ cmovq(not_sign, rax, Operand(rcx, 1)));
+ COMPARE("480f4a4102 REX.W cmovpeq rax,[rcx+0x2]",
+ cmovq(parity_even, rax, Operand(rcx, 2)));
+ COMPARE("480f4b4103 REX.W cmovpoq rax,[rcx+0x3]",
+ cmovq(parity_odd, rax, Operand(rcx, 3)));
+ COMPARE("480f4c02 REX.W cmovlq rax,[rdx]",
+ cmovq(less, rax, Operand(rdx, 0)));
+ COMPARE("480f4d4201 REX.W cmovgeq rax,[rdx+0x1]",
+ cmovq(greater_equal, rax, Operand(rdx, 1)));
+ COMPARE("480f4e4202 REX.W cmovleq rax,[rdx+0x2]",
+ cmovq(less_equal, rax, Operand(rdx, 2)));
+ COMPARE("480f4f4203 REX.W cmovgq rax,[rdx+0x3]",
+ cmovq(greater, rax, Operand(rdx, 3)));
+}
+
+// This compares just the disassemble instruction (without the hex).
+// Requires a |std::string actual| to be in scope.
+// Hard coded offset of 21, the hex part is 20 bytes, plus a space. If and when
+// the padding changes, this should be adjusted.
+constexpr int kHexOffset = 21;
+#define COMPARE_INSTR(str, ASM) \
+ t.prev_offset = t.pc_offset(); \
+ t.assm_.ASM; \
+ actual = t.InstructionDecode(); \
+ actual = std::string(actual, kHexOffset, actual.size() - kHexOffset); \
+ CHECK_EQ(str, actual);
+
+UNINITIALIZED_TEST(DisasmX64CheckOutputSSE) {
+ DisassemblerTester t;
+ std::string actual;
+
+ COMPARE("f30f2c948b10270000 cvttss2sil rdx,[rbx+rcx*4+0x2710]",
+ cvttss2si(rdx, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("f30f2cd1 cvttss2sil rdx,xmm1", cvttss2si(rdx, xmm1));
+ COMPARE("f3480f2a8c8b10270000 REX.W cvtsi2ss xmm1,[rbx+rcx*4+0x2710]",
+ cvtqsi2ss(xmm1, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("f3480f2aca REX.W cvtsi2ss xmm1,rdx", cvtqsi2ss(xmm1, rdx));
+ COMPARE("f3480f5bc1 REX.W cvttps2dq xmm0,xmm1",
+ cvttps2dq(xmm0, xmm1));
+ COMPARE("f3480f5b848b10270000 REX.W cvttps2dq xmm0,[rbx+rcx*4+0x2710]",
+ cvttps2dq(xmm0, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("0f28c1 movaps xmm0,xmm1", movaps(xmm0, xmm1));
+ COMPARE("0f28848b10270000 movaps xmm0,[rbx+rcx*4+0x2710]",
+ movaps(xmm0, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("66480f6f44240c REX.W movdqa xmm0,[rsp+0xc]",
+ movdqa(xmm0, Operand(rsp, 12)));
+ COMPARE("66480f7f44240c REX.W movdqa [rsp+0xc],xmm0",
+ movdqa(Operand(rsp, 12), xmm0));
+ COMPARE("f3480f6f44240c REX.W movdqu xmm0,[rsp+0xc]",
+ movdqu(xmm0, Operand(rsp, 12)));
+ COMPARE("f3480f7f44240c REX.W movdqu [rsp+0xc],xmm0",
+ movdqu(Operand(rsp, 12), xmm0));
+ COMPARE("f3480f6fc8 REX.W movdqu xmm1,xmm0", movdqu(xmm1, xmm0));
+ COMPARE("0f12e9 movhlps xmm5,xmm1", movhlps(xmm5, xmm1));
+ COMPARE("440f12848b10270000 movlps xmm8,[rbx+rcx*4+0x2710]",
+ movlps(xmm8, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("440f138c8b10270000 movlps [rbx+rcx*4+0x2710],xmm9",
+ movlps(Operand(rbx, rcx, times_4, 10000), xmm9));
+ COMPARE("0f16e9 movlhps xmm5,xmm1", movlhps(xmm5, xmm1));
+ COMPARE("440f16848b10270000 movhps xmm8,[rbx+rcx*4+0x2710]",
+ movhps(xmm8, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("440f178c8b10270000 movhps [rbx+rcx*4+0x2710],xmm9",
+ movhps(Operand(rbx, rcx, times_4, 10000), xmm9));
+ COMPARE("410fc6c100 shufps xmm0, xmm9, 0", shufps(xmm0, xmm9, 0x0));
+ COMPARE("0f2ec1 ucomiss xmm0,xmm1", ucomiss(xmm0, xmm1));
+ COMPARE("0f2e848b10270000 ucomiss xmm0,[rbx+rcx*4+0x2710]",
+ ucomiss(xmm0, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("410f50d1 movmskps rdx,xmm9", movmskps(rdx, xmm9));
+
+ std::string exp;
+
+#define COMPARE_SSE_INSTR(instruction, _, __) \
+ exp = #instruction " xmm1,xmm0"; \
+ COMPARE_INSTR(exp, instruction(xmm1, xmm0)); \
+ exp = #instruction " xmm1,[rbx+rcx*4+0x2710]"; \
+ COMPARE_INSTR(exp, instruction(xmm1, Operand(rbx, rcx, times_4, 10000)));
+ SSE_BINOP_INSTRUCTION_LIST(COMPARE_SSE_INSTR)
+ SSE_UNOP_INSTRUCTION_LIST(COMPARE_SSE_INSTR)
+#undef COMPARE_SSE_INSTR
+
+#define COMPARE_SSE_INSTR(instruction, _, __, ___) \
+ exp = #instruction " xmm1,xmm0"; \
+ COMPARE_INSTR(exp, instruction(xmm1, xmm0)); \
+ exp = #instruction " xmm1,[rbx+rcx*4+0x2710]"; \
+ COMPARE_INSTR(exp, instruction(xmm1, Operand(rbx, rcx, times_4, 10000)));
+ SSE_INSTRUCTION_LIST_SS(COMPARE_SSE_INSTR)
+#undef COMPARE_SSE_INSTR
+}
+
+UNINITIALIZED_TEST(DisasmX64CheckOutputSSE2) {
+ DisassemblerTester t;
+ std::string actual, exp;
+
+ COMPARE("f30fe6dc cvtdq2pd xmm3,xmm4", cvtdq2pd(xmm3, xmm4));
+ COMPARE("f20f2c948b10270000 cvttsd2sil rdx,[rbx+rcx*4+0x2710]",
+ cvttsd2si(rdx, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("f20f2cd1 cvttsd2sil rdx,xmm1", cvttsd2si(rdx, xmm1));
+ COMPARE("f2480f2cd1 REX.W cvttsd2siq rdx,xmm1",
+ cvttsd2siq(rdx, xmm1));
+ COMPARE("f2480f2c948b10270000 REX.W cvttsd2siq rdx,[rbx+rcx*4+0x2710]",
+ cvttsd2siq(rdx, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("f20f2a8c8b10270000 cvtsi2sd xmm1,[rbx+rcx*4+0x2710]",
+ cvtlsi2sd(xmm1, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("f20f2aca cvtsi2sd xmm1,rdx", cvtlsi2sd(xmm1, rdx));
+ COMPARE("f2480f2a8c8b10270000 REX.W cvtsi2sd xmm1,[rbx+rcx*4+0x2710]",
+ cvtqsi2sd(xmm1, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("f2480f2aca REX.W cvtsi2sd xmm1,rdx", cvtqsi2sd(xmm1, rdx));
+ COMPARE("f3410f5ac9 cvtss2sd xmm1,xmm9", cvtss2sd(xmm1, xmm9));
+ COMPARE("f30f5a8c8b10270000 cvtss2sd xmm1,[rbx+rcx*4+0x2710]",
+ cvtss2sd(xmm1, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("f2410f2dd1 cvtsd2sil rdx,xmm9", cvtsd2si(rdx, xmm9));
+ COMPARE("f2490f2dd1 REX.W cvtsd2siq rdx,xmm9",
+ cvtsd2siq(rdx, xmm9););
+
+ COMPARE("f20f108c8b10270000 movsd xmm1,[rbx+rcx*4+0x2710]",
+ movsd(xmm1, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("f20f118c8b10270000 movsd [rbx+rcx*4+0x2710],xmm1",
+ movsd(Operand(rbx, rcx, times_4, 10000), xmm1));
+ COMPARE("660f10848b10270000 movupd xmm0,[rbx+rcx*4+0x2710]",
+ movupd(xmm0, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("660f11848b10270000 movupd [rbx+rcx*4+0x2710],xmm0",
+ movupd(Operand(rbx, rcx, times_4, 10000), xmm0));
+ COMPARE("66480f6f848b10270000 REX.W movdqa xmm0,[rbx+rcx*4+0x2710]",
+ movdqa(xmm0, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("66480f7f848b10270000 REX.W movdqa [rbx+rcx*4+0x2710],xmm0",
+ movdqa(Operand(rbx, rcx, times_4, 10000), xmm0));
+ COMPARE("66480f7fc8 REX.W movdqa xmm0,xmm1", movdqa(xmm0, xmm1));
+ COMPARE("660f2ec1 ucomisd xmm0,xmm1", ucomisd(xmm0, xmm1));
+ COMPARE("66440f2e849310270000 ucomisd xmm8,[rbx+rdx*4+0x2710]",
+ ucomisd(xmm8, Operand(rbx, rdx, times_4, 10000)));
+ COMPARE("f2410fc2db01 cmpltsd xmm3,xmm11", cmpltsd(xmm3, xmm11));
+ COMPARE("66410f50d1 movmskpd rdx,xmm9", movmskpd(rdx, xmm9));
+ COMPARE("66410fd7d1 pmovmskb r9,xmm2", pmovmskb(rdx, xmm9));
+ COMPARE("660f76c8 pcmpeqd xmm1,xmm0", pcmpeqd(xmm1, xmm0));
+ COMPARE("66410f62cb punpckldq xmm1,xmm11", punpckldq(xmm1, xmm11));
+ COMPARE("660f626a04 punpckldq xmm5,[rdx+0x4]",
+ punpckldq(xmm5, Operand(rdx, 4)));
+ COMPARE("66450f6ac7 punpckhdq xmm8,xmm15", punpckhdq(xmm8, xmm15));
+ COMPARE("f20f70d403 pshuflw xmm2,xmm4,3", pshuflw(xmm2, xmm4, 3));
+ COMPARE("f3410f70c906 pshufhw xmm1,xmm9, 6", pshufhw(xmm1, xmm9, 6));
+
+#define COMPARE_SSE2_INSTR(instruction, _, __, ___) \
+ exp = #instruction " xmm1,xmm0"; \
+ COMPARE_INSTR(exp, instruction(xmm1, xmm0)); \
+ exp = #instruction " xmm1,[rbx+rcx*4+0x2710]"; \
+ COMPARE_INSTR(exp, instruction(xmm1, Operand(rbx, rcx, times_4, 10000)));
+ SSE2_INSTRUCTION_LIST(COMPARE_SSE2_INSTR)
+ SSE2_UNOP_INSTRUCTION_LIST(COMPARE_SSE2_INSTR)
+ SSE2_INSTRUCTION_LIST_SD(COMPARE_SSE2_INSTR)
+#undef COMPARE_SSE2_INSTR
+
+#define COMPARE_SSE2_SHIFT_IMM(instruction, _, __, ___, ____) \
+ exp = #instruction " xmm3,35"; \
+ COMPARE_INSTR(exp, instruction(xmm3, 0xA3));
+ SSE2_INSTRUCTION_LIST_SHIFT_IMM(COMPARE_SSE2_SHIFT_IMM)
+#undef COMPARE_SSE2_SHIFT_IMM
+}
- base::Vector<const char> expected =
- base::StaticCharVector("c5fd6fc1 vmovdqa ymm0,ymm1\0");
- CHECK(expected == actual);
+UNINITIALIZED_TEST(DisasmX64CheckOutputSSE3) {
+ if (!CpuFeatures::IsSupported(SSE3)) {
+ return;
+ }
- actual.Dispose();
+ DisassemblerTester t;
+ CpuFeatureScope scope(&t.assm_, SSE3);
+
+ COMPARE("f20f7cc8 haddps xmm1,xmm0", haddps(xmm1, xmm0));
+ COMPARE("f20f7c8c8b10270000 haddps xmm1,[rbx+rcx*4+0x2710]",
+ haddps(xmm1, Operand(rbx, rcx, times_4, 10000)));
+ COMPARE("f20ff04a04 lddqu xmm1,[rdx+0x4]",
+ lddqu(xmm1, Operand(rdx, 4)));
+ COMPARE("f20f124805 movddup xmm1,[rax+0x5]",
+ movddup(xmm1, Operand(rax, 5)));
+ COMPARE("f20f12ca movddup xmm1,xmm2", movddup(xmm1, xmm2));
+ COMPARE("f30f16ca movshdup xmm1,xmm2", movshdup(xmm1, xmm2));
+}
+
+UNINITIALIZED_TEST(DisasmX64CheckOutputSSSE3) {
+ if (!CpuFeatures::IsSupported(SSSE3)) {
+ return;
+ }
+
+ DisassemblerTester t;
+ std::string actual, exp;
+ CpuFeatureScope scope(&t.assm_, SSSE3);
+
+ COMPARE("660f3a0fe905 palignr xmm5,xmm1,0x5", palignr(xmm5, xmm1, 5));
+ COMPARE("660f3a0f6a0405 palignr xmm5,[rdx+0x4],0x5",
+ palignr(xmm5, Operand(rdx, 4), 5));
+
+#define COMPARE_SSSE3_INSTR(instruction, _, __, ___, ____) \
+ exp = #instruction " xmm5,xmm1"; \
+ COMPARE_INSTR(exp, instruction(xmm5, xmm1)); \
+ exp = #instruction " xmm5,[rbx+rcx*4+0x2710]"; \
+ COMPARE_INSTR(exp, instruction(xmm5, Operand(rbx, rcx, times_4, 10000)));
+ SSSE3_INSTRUCTION_LIST(COMPARE_SSSE3_INSTR)
+ SSSE3_UNOP_INSTRUCTION_LIST(COMPARE_SSSE3_INSTR)
+#undef COMPARE_SSSE3_INSTR
+}
+
+UNINITIALIZED_TEST(DisasmX64YMMRegister) {
+ if (!CpuFeatures::IsSupported(AVX)) return;
+ DisassemblerTester t;
+ CpuFeatureScope fscope(t.assm(), AVX);
+
+ // Short immediate instructions
+ COMPARE("c5fd6fc1 vmovdqa ymm0,ymm1", vmovdqa(ymm0, ymm1));
+ COMPARE("c5f77cc2 vhaddps ymm0,ymm1,ymm2",
+ vhaddps(ymm0, ymm1, ymm2));
+ COMPARE("c5f77c848b10270000 vhaddps ymm0,ymm1,[rbx+rcx*4+0x2710]",
+ vhaddps(ymm0, ymm1, Operand(rbx, rcx, times_4, 10000)));
}
#undef __
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index 342dd46d53..bd7d32a4ff 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -108,7 +108,7 @@ class Expectations {
CHECK(index < MAX_PROPERTIES);
kinds_[index] = kind;
locations_[index] = location;
- if (kind == kData && location == kField &&
+ if (kind == kData && location == PropertyLocation::kField &&
IsTransitionableFastElementsKind(elements_kind_)) {
// Maps with transitionable elements kinds must have the most general
// field type.
@@ -139,7 +139,7 @@ class Expectations {
os << " (";
if (constnesses_[i] == PropertyConstness::kConst) os << "const ";
os << (kinds_[i] == kData ? "data " : "accessor ");
- if (locations_[i] == kField) {
+ if (locations_[i] == PropertyLocation::kField) {
os << "field"
<< ": " << representations_[i].Mnemonic();
} else {
@@ -156,14 +156,15 @@ class Expectations {
Handle<FieldType> GetFieldType(int index) {
CHECK(index < MAX_PROPERTIES);
- CHECK_EQ(kField, locations_[index]);
+ CHECK_EQ(PropertyLocation::kField, locations_[index]);
return Handle<FieldType>::cast(values_[index]);
}
void SetDataField(int index, PropertyAttributes attrs,
PropertyConstness constness, Representation representation,
Handle<FieldType> field_type) {
- Init(index, kData, attrs, constness, kField, representation, field_type);
+ Init(index, kData, attrs, constness, PropertyLocation::kField,
+ representation, field_type);
}
void SetDataField(int index, PropertyConstness constness,
@@ -174,8 +175,9 @@ class Expectations {
}
void SetAccessorField(int index, PropertyAttributes attrs) {
- Init(index, kAccessor, attrs, PropertyConstness::kConst, kDescriptor,
- Representation::Tagged(), FieldType::Any(isolate_));
+ Init(index, kAccessor, attrs, PropertyConstness::kConst,
+ PropertyLocation::kDescriptor, Representation::Tagged(),
+ FieldType::Any(isolate_));
}
void SetAccessorField(int index) {
@@ -185,8 +187,8 @@ class Expectations {
void SetDataConstant(int index, PropertyAttributes attrs,
Handle<JSFunction> value) {
Handle<FieldType> field_type(FieldType::Class(value->map()), isolate_);
- Init(index, kData, attrs, PropertyConstness::kConst, kField,
- Representation::HeapObject(), field_type);
+ Init(index, kData, attrs, PropertyConstness::kConst,
+ PropertyLocation::kField, Representation::HeapObject(), field_type);
}
void SetDataConstant(int index, Handle<JSFunction> value) {
@@ -195,8 +197,8 @@ class Expectations {
void SetAccessorConstant(int index, PropertyAttributes attrs,
Handle<Object> getter, Handle<Object> setter) {
- Init(index, kAccessor, attrs, PropertyConstness::kConst, kDescriptor,
- Representation::Tagged(), getter);
+ Init(index, kAccessor, attrs, PropertyConstness::kConst,
+ PropertyLocation::kDescriptor, Representation::Tagged(), getter);
setter_values_[index] = setter;
}
@@ -204,7 +206,7 @@ class Expectations {
AccessorComponent component,
Handle<Object> accessor) {
CHECK_EQ(kAccessor, kinds_[index]);
- CHECK_EQ(kDescriptor, locations_[index]);
+ CHECK_EQ(PropertyLocation::kDescriptor, locations_[index]);
CHECK(index < number_of_properties_);
if (component == ACCESSOR_GETTER) {
values_[index] = accessor;
@@ -234,7 +236,7 @@ class Expectations {
void GeneralizeField(int index) {
CHECK(index < number_of_properties_);
representations_[index] = Representation::Tagged();
- if (locations_[index] == kField) {
+ if (locations_[index] == PropertyLocation::kField) {
values_[index] = FieldType::Any(isolate_);
}
}
@@ -255,7 +257,7 @@ class Expectations {
if (!details.representation().Equals(expected_representation)) return false;
Object expected_value = *values_[descriptor.as_int()];
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
if (details.kind() == kData) {
FieldType type = descriptors.GetFieldType(descriptor);
return FieldType::cast(expected_value) == type;
@@ -785,9 +787,9 @@ void TestGeneralizeField(const CRFTData& from, const CRFTData& to,
// Check the cases when the map being reconfigured is NOT a part of the
// transition tree. "None -> anything" representation changes make sense
// only for "attached" maps.
- int indices[] = {0, kPropCount - 1};
- for (int i = 0; i < static_cast<int>(arraysize(indices)); i++) {
- TestGeneralizeField(indices[i], 2, from, to, expected, expected_alert);
+ int indices2[] = {0, kPropCount - 1};
+ for (int i = 0; i < static_cast<int>(arraysize(indices2)); i++) {
+ TestGeneralizeField(indices2[i], 2, from, to, expected, expected_alert);
}
// Check that reconfiguration to the very same field works correctly.
@@ -2196,8 +2198,8 @@ static void TestGeneralizeFieldWithSpecialTransition(
if (config->is_non_equivalent_transition()) {
// In case of non-equivalent transition currently we generalize all
// representations.
- for (int i = 0; i < kPropCount; i++) {
- expectations2.GeneralizeField(i);
+ for (int j = 0; j < kPropCount; j++) {
+ expectations2.GeneralizeField(j);
}
CHECK(new_map2->GetBackPointer().IsUndefined(isolate));
CHECK(expectations2.Check(*new_map2));
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
index 55b34e3838..d9efaba7b1 100644
--- a/deps/v8/test/cctest/test-global-handles.cc
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -504,7 +504,7 @@ TEST(FinalizerOnUnmodifiedJSApiObjectDoesNotCrash) {
v8::WeakCallbackType::kFinalizer);
fp.flag = false;
{
- v8::HandleScope scope(isolate);
+ v8::HandleScope inner_scope(isolate);
v8::Local<v8::Object> tmp = v8::Local<v8::Object>::New(isolate, fp.handle);
USE(tmp);
InvokeScavenge();
diff --git a/deps/v8/test/cctest/test-intl.cc b/deps/v8/test/cctest/test-intl.cc
index 7824fd657c..c8fe1fd9f9 100644
--- a/deps/v8/test/cctest/test-intl.cc
+++ b/deps/v8/test/cctest/test-intl.cc
@@ -4,7 +4,6 @@
#ifdef V8_INTL_SUPPORT
-#include "src/objects/intl-objects.h"
#include "src/objects/js-break-iterator.h"
#include "src/objects/js-collator.h"
#include "src/objects/js-date-time-format.h"
@@ -15,6 +14,7 @@
#include "src/objects/js-segmenter.h"
#include "src/objects/lookup.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/option-utils.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -123,8 +123,8 @@ TEST(GetStringOption) {
// No value found
std::unique_ptr<char[]> result = nullptr;
Maybe<bool> found =
- Intl::GetStringOption(isolate, options, "foo",
- std::vector<const char*>{}, "service", &result);
+ GetStringOption(isolate, options, "foo", std::vector<const char*>{},
+ "service", &result);
CHECK(!found.FromJust());
CHECK_NULL(result);
}
@@ -140,8 +140,8 @@ TEST(GetStringOption) {
// Value found
std::unique_ptr<char[]> result = nullptr;
Maybe<bool> found =
- Intl::GetStringOption(isolate, options, "foo",
- std::vector<const char*>{}, "service", &result);
+ GetStringOption(isolate, options, "foo", std::vector<const char*>{},
+ "service", &result);
CHECK(found.FromJust());
CHECK_NOT_NULL(result);
CHECK_EQ(0, strcmp("42", result.get()));
@@ -150,9 +150,9 @@ TEST(GetStringOption) {
{
// No expected value in values array
std::unique_ptr<char[]> result = nullptr;
- Maybe<bool> found = Intl::GetStringOption(isolate, options, "foo",
- std::vector<const char*>{"bar"},
- "service", &result);
+ Maybe<bool> found =
+ GetStringOption(isolate, options, "foo",
+ std::vector<const char*>{"bar"}, "service", &result);
CHECK(isolate->has_pending_exception());
CHECK(found.IsNothing());
CHECK_NULL(result);
@@ -162,9 +162,9 @@ TEST(GetStringOption) {
{
// Expected value in values array
std::unique_ptr<char[]> result = nullptr;
- Maybe<bool> found = Intl::GetStringOption(isolate, options, "foo",
- std::vector<const char*>{"42"},
- "service", &result);
+ Maybe<bool> found =
+ GetStringOption(isolate, options, "foo", std::vector<const char*>{"42"},
+ "service", &result);
CHECK(found.FromJust());
CHECK_NOT_NULL(result);
CHECK_EQ(0, strcmp("42", result.get()));
@@ -181,7 +181,7 @@ TEST(GetBoolOption) {
{
bool result = false;
Maybe<bool> found =
- Intl::GetBoolOption(isolate, options, "foo", "service", &result);
+ GetBoolOption(isolate, options, "foo", "service", &result);
CHECK(!found.FromJust());
CHECK(!result);
}
@@ -197,7 +197,7 @@ TEST(GetBoolOption) {
.Assert();
bool result = false;
Maybe<bool> found =
- Intl::GetBoolOption(isolate, options, "foo", "service", &result);
+ GetBoolOption(isolate, options, "foo", "service", &result);
CHECK(found.FromJust());
CHECK(!result);
}
@@ -212,7 +212,7 @@ TEST(GetBoolOption) {
.Assert();
bool result = false;
Maybe<bool> found =
- Intl::GetBoolOption(isolate, options, "foo", "service", &result);
+ GetBoolOption(isolate, options, "foo", "service", &result);
CHECK(found.FromJust());
CHECK(result);
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index d9eb8851b0..406980d1a0 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -893,7 +893,7 @@ void TestFloat32x4Abs(MacroAssembler* masm, Label* exit, float x, float y,
__ Movss(Operand(rsp, 3 * kFloatSize), xmm4);
__ Movups(xmm0, Operand(rsp, 0));
- __ Absps(xmm0, xmm0);
+ __ Absps(xmm0, xmm0, kScratchRegister);
__ Movups(Operand(rsp, 0), xmm0);
__ incq(rax);
@@ -930,7 +930,7 @@ void TestFloat32x4Neg(MacroAssembler* masm, Label* exit, float x, float y,
__ Movss(Operand(rsp, 3 * kFloatSize), xmm4);
__ Movups(xmm0, Operand(rsp, 0));
- __ Negps(xmm0, xmm0);
+ __ Negps(xmm0, xmm0, kScratchRegister);
__ Movups(Operand(rsp, 0), xmm0);
__ incq(rax);
@@ -962,7 +962,7 @@ void TestFloat64x2Abs(MacroAssembler* masm, Label* exit, double x, double y) {
__ Movsd(Operand(rsp, 1 * kDoubleSize), xmm2);
__ movupd(xmm0, Operand(rsp, 0));
- __ Abspd(xmm0, xmm0);
+ __ Abspd(xmm0, xmm0, kScratchRegister);
__ movupd(Operand(rsp, 0), xmm0);
__ incq(rax);
@@ -986,7 +986,7 @@ void TestFloat64x2Neg(MacroAssembler* masm, Label* exit, double x, double y) {
__ Movsd(Operand(rsp, 1 * kDoubleSize), xmm2);
__ movupd(xmm0, Operand(rsp, 0));
- __ Negpd(xmm0, xmm0);
+ __ Negpd(xmm0, xmm0, kScratchRegister);
__ movupd(Operand(rsp, 0), xmm0);
__ incq(rax);
diff --git a/deps/v8/test/cctest/test-managed.cc b/deps/v8/test/cctest/test-managed.cc
index 8d9185faa7..da863f11c0 100644
--- a/deps/v8/test/cctest/test-managed.cc
+++ b/deps/v8/test/cctest/test-managed.cc
@@ -6,8 +6,7 @@
#include <stdlib.h>
#include <string.h>
-#include "src/objects/managed.h"
-
+#include "src/objects/managed-inl.h"
#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 2692748e62..ff80147f97 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -643,9 +643,8 @@ static Handle<JSRegExp> CreateJSRegExp(Handle<String> source, Handle<Code> code,
factory->SetRegExpIrregexpData(regexp, source, {}, 0,
JSRegExp::kNoBacktrackLimit);
- regexp->SetDataAt(is_unicode ? JSRegExp::kIrregexpUC16CodeIndex
- : JSRegExp::kIrregexpLatin1CodeIndex,
- ToCodeT(*code));
+ const bool is_latin1 = !is_unicode;
+ regexp->set_code(is_latin1, code);
return regexp;
}
@@ -2332,8 +2331,8 @@ TEST(UnicodePropertyEscapeCodeSize) {
static constexpr int kMaxSize = 200 * KB;
static constexpr bool kIsNotLatin1 = false;
- Object maybe_code = re->Code(kIsNotLatin1);
- Object maybe_bytecode = re->Bytecode(kIsNotLatin1);
+ Object maybe_code = re->code(kIsNotLatin1);
+ Object maybe_bytecode = re->bytecode(kIsNotLatin1);
if (maybe_bytecode.IsByteArray()) {
// On x64, excessive inlining produced >250KB.
CHECK_LT(ByteArray::cast(maybe_bytecode).Size(), kMaxSize);
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 08f9447b4e..8172d78c66 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -806,7 +806,7 @@ void TestCustomSnapshotDataBlobWithIrregexpCode(
// Check that ATOM regexp remains valid.
i::Handle<i::JSRegExp> re =
Utils::OpenHandle(*CompileRun("re2").As<v8::RegExp>());
- CHECK_EQ(re->TypeTag(), JSRegExp::ATOM);
+ CHECK_EQ(re->type_tag(), JSRegExp::ATOM);
CHECK(!re->HasCompiledCode());
}
}
@@ -3724,6 +3724,31 @@ TEST(SnapshotCreatorUnknownHandles) {
delete[] blob.data;
}
+UNINITIALIZED_TEST(SnapshotAccessorDescriptors) {
+ const char* source1 =
+ "var bValue = 38;\n"
+ "Object.defineProperty(this, 'property1', {\n"
+ " get() { return bValue; },\n"
+ " set(newValue) { bValue = newValue; },\n"
+ "});";
+ v8::StartupData data1 = CreateSnapshotDataBlob(source1);
+
+ v8::Isolate::CreateParams params1;
+ params1.snapshot_blob = &data1;
+ params1.array_buffer_allocator = CcTest::array_buffer_allocator();
+
+ v8::Isolate* isolate1 = v8::Isolate::New(params1);
+ {
+ v8::Isolate::Scope i_scope(isolate1);
+ v8::HandleScope h_scope(isolate1);
+ v8::Local<v8::Context> context = v8::Context::New(isolate1);
+ v8::Context::Scope c_scope(context);
+ ExpectInt32("this.property1", 38);
+ }
+ isolate1->Dispose();
+ delete[] data1.data;
+}
+
UNINITIALIZED_TEST(SnapshotCreatorIncludeGlobalProxy) {
DisableAlwaysOpt();
DisableEmbeddedBlobRefcounting();
@@ -3832,9 +3857,8 @@ UNINITIALIZED_TEST(SnapshotCreatorIncludeGlobalProxy) {
extension->set_auto_enable(true);
v8::RegisterExtension(std::move(extension));
{
- // Create a new context from default context snapshot. This will
- // create a new global object from a new global object template
- // without the interceptor.
+ // Create a new context from default context snapshot. This will also
+ // deserialize its global object with interceptor.
v8::HandleScope handle_scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
@@ -3844,9 +3868,7 @@ UNINITIALIZED_TEST(SnapshotCreatorIncludeGlobalProxy) {
ExpectInt32("j()", 25);
ExpectInt32("o.p", 8);
ExpectInt32("a", 26);
- v8::TryCatch try_catch(isolate);
- CHECK(CompileRun("x").IsEmpty());
- CHECK(try_catch.HasCaught());
+ ExpectInt32("x", 2016);
}
{
// Create a new context from first additional context snapshot. This
diff --git a/deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc b/deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc
index 7d59331e31..863177bb22 100644
--- a/deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc
+++ b/deps/v8/test/cctest/test-swiss-name-dictionary-csa.cc
@@ -420,8 +420,8 @@ Handle<Code> CSATestRunner::create_get_counts(Isolate* isolate) {
TNode<FixedArray> results = m.AllocateZeroedFixedArray(m.IntPtrConstant(3));
auto check_and_add = [&](TNode<IntPtrT> value, int array_index) {
- CSA_ASSERT(&m, m.UintPtrGreaterThanOrEqual(value, m.IntPtrConstant(0)));
- CSA_ASSERT(&m, m.UintPtrLessThanOrEqual(
+ CSA_DCHECK(&m, m.UintPtrGreaterThanOrEqual(value, m.IntPtrConstant(0)));
+ CSA_DCHECK(&m, m.UintPtrLessThanOrEqual(
value, m.IntPtrConstant(Smi::kMaxValue)));
TNode<Smi> smi = m.SmiFromIntPtr(value);
m.StoreFixedArrayElement(results, array_index, smi);
diff --git a/deps/v8/test/cctest/test-weakmaps.cc b/deps/v8/test/cctest/test-weakmaps.cc
index cba563242f..3a82cf67f1 100644
--- a/deps/v8/test/cctest/test-weakmaps.cc
+++ b/deps/v8/test/cctest/test-weakmaps.cc
@@ -28,7 +28,7 @@
#include <utility>
#include "src/execution/isolate.h"
-#include "src/handles/global-handles.h"
+#include "src/handles/global-handles-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
#include "src/objects/hash-table-inl.h"
diff --git a/deps/v8/test/cctest/test-weaksets.cc b/deps/v8/test/cctest/test-weaksets.cc
index fedcd65ad7..0a890c27a8 100644
--- a/deps/v8/test/cctest/test-weaksets.cc
+++ b/deps/v8/test/cctest/test-weaksets.cc
@@ -28,7 +28,7 @@
#include <utility>
#include "src/execution/isolate.h"
-#include "src/handles/global-handles.h"
+#include "src/handles/global-handles-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
#include "src/objects/hash-table-inl.h"
diff --git a/deps/v8/test/cctest/torque/test-torque.cc b/deps/v8/test/cctest/torque/test-torque.cc
index 653eebe66f..f7eba9ebe7 100644
--- a/deps/v8/test/cctest/torque/test-torque.cc
+++ b/deps/v8/test/cctest/torque/test-torque.cc
@@ -323,7 +323,7 @@ TEST(TestCatch1) {
TNode<Smi> result =
m.TestCatch1(m.UncheckedCast<Context>(m.HeapConstant(context)));
USE(result);
- CSA_ASSERT(&m, m.TaggedEqual(result, m.SmiConstant(1)));
+ CSA_DCHECK(&m, m.TaggedEqual(result, m.SmiConstant(1)));
m.Return(m.UndefinedConstant());
}
FunctionTester ft(asm_tester.GenerateCode(), 0);
@@ -342,7 +342,7 @@ TEST(TestCatch2) {
TNode<Smi> result =
m.TestCatch2(m.UncheckedCast<Context>(m.HeapConstant(context)));
USE(result);
- CSA_ASSERT(&m, m.TaggedEqual(result, m.SmiConstant(2)));
+ CSA_DCHECK(&m, m.TaggedEqual(result, m.SmiConstant(2)));
m.Return(m.UndefinedConstant());
}
FunctionTester ft(asm_tester.GenerateCode(), 0);
@@ -361,7 +361,7 @@ TEST(TestCatch3) {
TNode<Smi> result =
m.TestCatch3(m.UncheckedCast<Context>(m.HeapConstant(context)));
USE(result);
- CSA_ASSERT(&m, m.TaggedEqual(result, m.SmiConstant(2)));
+ CSA_DCHECK(&m, m.TaggedEqual(result, m.SmiConstant(2)));
m.Return(m.UndefinedConstant());
}
FunctionTester ft(asm_tester.GenerateCode(), 0);
diff --git a/deps/v8/test/cctest/wasm/test-gc.cc b/deps/v8/test/cctest/wasm/test-gc.cc
index d24a926401..e6f98e55fb 100644
--- a/deps/v8/test/cctest/wasm/test-gc.cc
+++ b/deps/v8/test/cctest/wasm/test-gc.cc
@@ -81,18 +81,20 @@ class WasmGCTester {
isolate_->factory()->undefined_value(), argc, args);
}
- byte DefineStruct(std::initializer_list<F> fields) {
+ byte DefineStruct(std::initializer_list<F> fields,
+ uint32_t supertype = kNoSuperType) {
StructType::Builder type_builder(&zone_,
static_cast<uint32_t>(fields.size()));
for (F field : fields) {
type_builder.AddField(field.first, field.second);
}
- return builder_.AddStructType(type_builder.Build());
+ return builder_.AddStructType(type_builder.Build(), supertype);
}
- byte DefineArray(ValueType element_type, bool mutability) {
- return builder_.AddArrayType(
- zone_.New<ArrayType>(element_type, mutability));
+ byte DefineArray(ValueType element_type, bool mutability,
+ uint32_t supertype = kNoSuperType) {
+ return builder_.AddArrayType(zone_.New<ArrayType>(element_type, mutability),
+ supertype);
}
byte DefineSignature(FunctionSig* sig) { return builder_.AddSignature(sig); }
@@ -211,7 +213,7 @@ class WasmGCTester {
void CallFunctionImpl(uint32_t function_index, const FunctionSig* sig,
CWasmArgumentsPacker* packer) {
- WasmCodeRefScope scope;
+ WasmCodeRefScope code_ref_scope;
NativeModule* native_module = instance_->module_object().native_module();
WasmCode* code = native_module->GetCode(function_index);
Address wasm_call_target = code->instruction_start();
@@ -276,6 +278,11 @@ WASM_COMPILED_EXEC_TEST(WasmBasicStruct) {
WASM_RTT_CANON(type_index)),
kExprEnd});
+ const byte kGetStructNominal = tester.DefineFunction(
+ &sig_q_v, {},
+ {WASM_STRUCT_NEW_DEFAULT(type_index), WASM_DROP,
+ WASM_STRUCT_NEW(type_index, WASM_I32V(42), WASM_I32V(64)), kExprEnd});
+
// Test struct.new, returning reference to an empty struct.
const byte kGetEmptyStruct = tester.DefineFunction(
&sig_qe_v, {},
@@ -303,6 +310,9 @@ WASM_COMPILED_EXEC_TEST(WasmBasicStruct) {
tester.CheckResult(kGet1, 42);
tester.CheckResult(kGet2, 64);
CHECK(tester.GetResultObject(kGetStruct).ToHandleChecked()->IsWasmStruct());
+ CHECK(tester.GetResultObject(kGetStructNominal)
+ .ToHandleChecked()
+ ->IsWasmStruct());
CHECK(tester.GetResultObject(kGetEmptyStruct)
.ToHandleChecked()
->IsWasmStruct());
@@ -457,6 +467,30 @@ WASM_COMPILED_EXEC_TEST(BrOnCast) {
WASM_GC_OP(kExprStructGet), type_index, 0, WASM_LOCAL_GET(0),
kExprI32Add, kExprEnd});
+ const byte kTestStructStatic = tester.DefineFunction(
+ tester.sigs.i_v(), {kWasmI32, kDataRefNull},
+ {WASM_BLOCK_R(
+ ValueType::Ref(type_index, kNullable),
+ WASM_LOCAL_SET(0, WASM_I32V(111)),
+ // Pipe a struct through a local so it's statically typed
+ // as dataref.
+ WASM_LOCAL_SET(1, WASM_STRUCT_NEW(other_type_index, WASM_F32(1.0))),
+ WASM_LOCAL_GET(1),
+ // The type check fails, so this branch isn't taken.
+ WASM_BR_ON_CAST_STATIC(0, type_index), WASM_DROP,
+
+ WASM_LOCAL_SET(0, WASM_I32V(221)), // (Final result) - 1
+ WASM_LOCAL_SET(1, WASM_STRUCT_NEW(type_index, WASM_I32V(1))),
+ WASM_LOCAL_GET(1),
+ // This branch is taken.
+ WASM_BR_ON_CAST_STATIC(0, type_index),
+ WASM_GC_OP(kExprRefCastStatic), type_index,
+
+ // Not executed due to the branch.
+ WASM_LOCAL_SET(0, WASM_I32V(333))),
+ WASM_GC_OP(kExprStructGet), type_index, 0, WASM_LOCAL_GET(0),
+ kExprI32Add, kExprEnd});
+
const byte kTestNull = tester.DefineFunction(
tester.sigs.i_v(), {kWasmI32, kDataRefNull},
{WASM_BLOCK_R(ValueType::Ref(type_index, kNullable),
@@ -488,6 +522,7 @@ WASM_COMPILED_EXEC_TEST(BrOnCast) {
tester.CompileModule();
tester.CheckResult(kTestStruct, 222);
+ tester.CheckResult(kTestStructStatic, 222);
tester.CheckResult(kTestNull, 222);
tester.CheckResult(kTypedAfterBranch, 42);
}
@@ -542,8 +577,21 @@ WASM_COMPILED_EXEC_TEST(BrOnCastFail) {
WASM_RTT_CANON(type1)))});
#undef FUNCTION_BODY
+ const byte kBranchTakenStatic = tester.DefineFunction(
+ tester.sigs.i_v(), {kDataRefNull},
+ {WASM_LOCAL_SET(
+ 0, WASM_STRUCT_NEW(type1, WASM_I64V(10), WASM_I32V(field1_value))),
+ WASM_BLOCK(
+ WASM_BLOCK_R(kDataRefNull, WASM_LOCAL_GET(0),
+ WASM_BR_ON_CAST_STATIC_FAIL(0, type0),
+ WASM_GC_OP(kExprStructGet), type0, 0, kExprReturn),
+ kExprBrOnNull, 0, WASM_GC_OP(kExprRefCastStatic), type1,
+ WASM_GC_OP(kExprStructGet), type1, 1, kExprReturn),
+ WASM_I32V(null_value), kExprEnd});
+
tester.CompileModule();
tester.CheckResult(kBranchTaken, field1_value);
+ tester.CheckResult(kBranchTakenStatic, field1_value);
tester.CheckResult(kBranchNotTaken, field0_value);
tester.CheckResult(kNull, null_value);
tester.CheckResult(kUnrelatedTypes, field1_value);
@@ -779,19 +827,30 @@ WASM_COMPILED_EXEC_TEST(WasmBasicArray) {
WASM_RTT_CANON(type_index)),
kExprEnd});
+ const byte kAllocateStatic = tester.DefineFunction(
+ &sig_q_v, {},
+ {WASM_ARRAY_NEW_DEFAULT(type_index, WASM_I32V(2)), WASM_DROP,
+ WASM_ARRAY_NEW(type_index, WASM_I32V(42), WASM_I32V(2)), kExprEnd});
+
+ const byte kInit = tester.DefineFunction(
+ &sig_q_v, {},
+ {WASM_ARRAY_INIT(type_index, 3, WASM_I32V(10), WASM_I32V(20),
+ WASM_I32V(30), WASM_RTT_CANON(type_index)),
+ kExprEnd});
+
const uint32_t kLongLength = 1u << 16;
const byte kAllocateLarge = tester.DefineFunction(
&sig_q_v, {},
- {WASM_ARRAY_NEW_DEFAULT(type_index, WASM_I32V(kLongLength),
- WASM_RTT_CANON(type_index)),
+ {WASM_ARRAY_NEW_DEFAULT_WITH_RTT(type_index, WASM_I32V(kLongLength),
+ WASM_RTT_CANON(type_index)),
kExprEnd});
ArrayType array_type(kWasmI32, true);
const uint32_t kTooLong = WasmArray::MaxLength(&array_type) + 1;
const byte kAllocateTooLarge = tester.DefineFunction(
&sig_q_v, {},
- {WASM_ARRAY_NEW_DEFAULT(type_index, WASM_I32V(kTooLong),
- WASM_RTT_CANON(type_index)),
+ {WASM_ARRAY_NEW_DEFAULT_WITH_RTT(type_index, WASM_I32V(kTooLong),
+ WASM_RTT_CANON(type_index)),
kExprEnd});
// Tests that fp arrays work properly.
@@ -818,11 +877,20 @@ WASM_COMPILED_EXEC_TEST(WasmBasicArray) {
tester.CheckResult(kGetLength, 42);
tester.CheckResult(kTestFpArray, static_cast<int32_t>(result_value));
- MaybeHandle<Object> h_result = tester.GetResultObject(kAllocate);
- CHECK(h_result.ToHandleChecked()->IsWasmArray());
-#if OBJECT_PRINT
- h_result.ToHandleChecked()->Print();
-#endif
+ Handle<Object> h_result = tester.GetResultObject(kAllocate).ToHandleChecked();
+ CHECK(h_result->IsWasmArray());
+ CHECK_EQ(2, Handle<WasmArray>::cast(h_result)->length());
+
+ h_result = tester.GetResultObject(kAllocateStatic).ToHandleChecked();
+ CHECK(h_result->IsWasmArray());
+ CHECK_EQ(2, Handle<WasmArray>::cast(h_result)->length());
+
+ Handle<Object> init_result = tester.GetResultObject(kInit).ToHandleChecked();
+ CHECK(init_result->IsWasmArray());
+ CHECK_EQ(3, Handle<WasmArray>::cast(init_result)->length());
+ CHECK_EQ(10, Handle<WasmArray>::cast(init_result)->GetElement(0).to_i32());
+ CHECK_EQ(20, Handle<WasmArray>::cast(init_result)->GetElement(1).to_i32());
+ CHECK_EQ(30, Handle<WasmArray>::cast(init_result)->GetElement(2).to_i32());
MaybeHandle<Object> maybe_large_result =
tester.GetResultObject(kAllocateLarge);
@@ -913,8 +981,9 @@ WASM_COMPILED_EXEC_TEST(WasmArrayCopy) {
// Copies i32 ranges: local1[0..3] to local2[6..9].
const byte kCopyI32 = tester.DefineFunction(
tester.sigs.i_i(), {optref(array32_index), optref(array32_index)},
- {WASM_LOCAL_SET(1, WASM_ARRAY_NEW_DEFAULT(array32_index, WASM_I32V(10),
- WASM_RTT_CANON(array32_index))),
+ {WASM_LOCAL_SET(
+ 1, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array32_index, WASM_I32V(10),
+ WASM_RTT_CANON(array32_index))),
WASM_ARRAY_SET(array32_index, WASM_LOCAL_GET(1), WASM_I32V(0),
WASM_I32V(0)),
WASM_ARRAY_SET(array32_index, WASM_LOCAL_GET(1), WASM_I32V(1),
@@ -923,8 +992,9 @@ WASM_COMPILED_EXEC_TEST(WasmArrayCopy) {
WASM_I32V(2)),
WASM_ARRAY_SET(array32_index, WASM_LOCAL_GET(1), WASM_I32V(3),
WASM_I32V(3)),
- WASM_LOCAL_SET(2, WASM_ARRAY_NEW_DEFAULT(array32_index, WASM_I32V(10),
- WASM_RTT_CANON(array32_index))),
+ WASM_LOCAL_SET(
+ 2, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array32_index, WASM_I32V(10),
+ WASM_RTT_CANON(array32_index))),
WASM_ARRAY_COPY(array32_index, array32_index, WASM_LOCAL_GET(2),
WASM_I32V(6), WASM_LOCAL_GET(1), WASM_I32V(0),
WASM_I32V(4)),
@@ -934,8 +1004,9 @@ WASM_COMPILED_EXEC_TEST(WasmArrayCopy) {
// Copies i16 ranges: local1[0..3] to local2[6..9].
const byte kCopyI16 = tester.DefineFunction(
tester.sigs.i_i(), {optref(array16_index), optref(array16_index)},
- {WASM_LOCAL_SET(1, WASM_ARRAY_NEW_DEFAULT(array16_index, WASM_I32V(10),
- WASM_RTT_CANON(array16_index))),
+ {WASM_LOCAL_SET(
+ 1, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array16_index, WASM_I32V(10),
+ WASM_RTT_CANON(array16_index))),
WASM_ARRAY_SET(array16_index, WASM_LOCAL_GET(1), WASM_I32V(0),
WASM_I32V(0)),
WASM_ARRAY_SET(array16_index, WASM_LOCAL_GET(1), WASM_I32V(1),
@@ -944,8 +1015,9 @@ WASM_COMPILED_EXEC_TEST(WasmArrayCopy) {
WASM_I32V(2)),
WASM_ARRAY_SET(array16_index, WASM_LOCAL_GET(1), WASM_I32V(3),
WASM_I32V(3)),
- WASM_LOCAL_SET(2, WASM_ARRAY_NEW_DEFAULT(array16_index, WASM_I32V(10),
- WASM_RTT_CANON(array16_index))),
+ WASM_LOCAL_SET(
+ 2, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array16_index, WASM_I32V(10),
+ WASM_RTT_CANON(array16_index))),
WASM_ARRAY_COPY(array16_index, array16_index, WASM_LOCAL_GET(2),
WASM_I32V(6), WASM_LOCAL_GET(1), WASM_I32V(0),
WASM_I32V(4)),
@@ -956,24 +1028,28 @@ WASM_COMPILED_EXEC_TEST(WasmArrayCopy) {
const byte kCopyRef = tester.DefineFunction(
FunctionSig::Build(tester.zone(), {optref(array32_index)}, {kWasmI32}),
{optref(arrayref_index), optref(arrayref_index)},
- {WASM_LOCAL_SET(1,
- WASM_ARRAY_NEW_DEFAULT(arrayref_index, WASM_I32V(10),
- WASM_RTT_CANON(arrayref_index))),
- WASM_ARRAY_SET(arrayref_index, WASM_LOCAL_GET(1), WASM_I32V(0),
- WASM_ARRAY_NEW_DEFAULT(array32_index, WASM_I32V(6),
- WASM_RTT_CANON(array32_index))),
- WASM_ARRAY_SET(arrayref_index, WASM_LOCAL_GET(1), WASM_I32V(1),
- WASM_ARRAY_NEW_DEFAULT(array32_index, WASM_I32V(7),
- WASM_RTT_CANON(array32_index))),
- WASM_ARRAY_SET(arrayref_index, WASM_LOCAL_GET(1), WASM_I32V(2),
- WASM_ARRAY_NEW_DEFAULT(array32_index, WASM_I32V(8),
- WASM_RTT_CANON(array32_index))),
- WASM_ARRAY_SET(arrayref_index, WASM_LOCAL_GET(1), WASM_I32V(3),
- WASM_ARRAY_NEW_DEFAULT(array32_index, WASM_I32V(9),
- WASM_RTT_CANON(array32_index))),
- WASM_LOCAL_SET(2,
- WASM_ARRAY_NEW_DEFAULT(arrayref_index, WASM_I32V(10),
- WASM_RTT_CANON(arrayref_index))),
+ {WASM_LOCAL_SET(
+ 1, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(arrayref_index, WASM_I32V(10),
+ WASM_RTT_CANON(arrayref_index))),
+ WASM_ARRAY_SET(
+ arrayref_index, WASM_LOCAL_GET(1), WASM_I32V(0),
+ WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array32_index, WASM_I32V(6),
+ WASM_RTT_CANON(array32_index))),
+ WASM_ARRAY_SET(
+ arrayref_index, WASM_LOCAL_GET(1), WASM_I32V(1),
+ WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array32_index, WASM_I32V(7),
+ WASM_RTT_CANON(array32_index))),
+ WASM_ARRAY_SET(
+ arrayref_index, WASM_LOCAL_GET(1), WASM_I32V(2),
+ WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array32_index, WASM_I32V(8),
+ WASM_RTT_CANON(array32_index))),
+ WASM_ARRAY_SET(
+ arrayref_index, WASM_LOCAL_GET(1), WASM_I32V(3),
+ WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array32_index, WASM_I32V(9),
+ WASM_RTT_CANON(array32_index))),
+ WASM_LOCAL_SET(
+ 2, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(arrayref_index, WASM_I32V(10),
+ WASM_RTT_CANON(arrayref_index))),
WASM_ARRAY_COPY(arrayref_index, arrayref_index, WASM_LOCAL_GET(2),
WASM_I32V(6), WASM_LOCAL_GET(1), WASM_I32V(0),
WASM_I32V(4)),
@@ -984,21 +1060,25 @@ WASM_COMPILED_EXEC_TEST(WasmArrayCopy) {
const byte kCopyRefOverlapping = tester.DefineFunction(
FunctionSig::Build(tester.zone(), {optref(array32_index)}, {kWasmI32}),
{optref(arrayref_index)},
- {WASM_LOCAL_SET(1,
- WASM_ARRAY_NEW_DEFAULT(arrayref_index, WASM_I32V(10),
- WASM_RTT_CANON(arrayref_index))),
- WASM_ARRAY_SET(arrayref_index, WASM_LOCAL_GET(1), WASM_I32V(0),
- WASM_ARRAY_NEW_DEFAULT(array32_index, WASM_I32V(2),
- WASM_RTT_CANON(array32_index))),
- WASM_ARRAY_SET(arrayref_index, WASM_LOCAL_GET(1), WASM_I32V(1),
- WASM_ARRAY_NEW_DEFAULT(array32_index, WASM_I32V(3),
- WASM_RTT_CANON(array32_index))),
- WASM_ARRAY_SET(arrayref_index, WASM_LOCAL_GET(1), WASM_I32V(2),
- WASM_ARRAY_NEW_DEFAULT(array32_index, WASM_I32V(4),
- WASM_RTT_CANON(array32_index))),
- WASM_ARRAY_SET(arrayref_index, WASM_LOCAL_GET(1), WASM_I32V(3),
- WASM_ARRAY_NEW_DEFAULT(array32_index, WASM_I32V(5),
- WASM_RTT_CANON(array32_index))),
+ {WASM_LOCAL_SET(
+ 1, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(arrayref_index, WASM_I32V(10),
+ WASM_RTT_CANON(arrayref_index))),
+ WASM_ARRAY_SET(
+ arrayref_index, WASM_LOCAL_GET(1), WASM_I32V(0),
+ WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array32_index, WASM_I32V(2),
+ WASM_RTT_CANON(array32_index))),
+ WASM_ARRAY_SET(
+ arrayref_index, WASM_LOCAL_GET(1), WASM_I32V(1),
+ WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array32_index, WASM_I32V(3),
+ WASM_RTT_CANON(array32_index))),
+ WASM_ARRAY_SET(
+ arrayref_index, WASM_LOCAL_GET(1), WASM_I32V(2),
+ WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array32_index, WASM_I32V(4),
+ WASM_RTT_CANON(array32_index))),
+ WASM_ARRAY_SET(
+ arrayref_index, WASM_LOCAL_GET(1), WASM_I32V(3),
+ WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array32_index, WASM_I32V(5),
+ WASM_RTT_CANON(array32_index))),
WASM_ARRAY_COPY(arrayref_index, arrayref_index, WASM_LOCAL_GET(1),
WASM_I32V(2), WASM_LOCAL_GET(1), WASM_I32V(0),
WASM_I32V(4)),
@@ -1007,10 +1087,12 @@ WASM_COMPILED_EXEC_TEST(WasmArrayCopy) {
const byte kOobSource = tester.DefineFunction(
tester.sigs.v_v(), {optref(array32_index), optref(array32_index)},
- {WASM_LOCAL_SET(0, WASM_ARRAY_NEW_DEFAULT(array32_index, WASM_I32V(10),
- WASM_RTT_CANON(array32_index))),
- WASM_LOCAL_SET(1, WASM_ARRAY_NEW_DEFAULT(array32_index, WASM_I32V(10),
- WASM_RTT_CANON(array32_index))),
+ {WASM_LOCAL_SET(
+ 0, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array32_index, WASM_I32V(10),
+ WASM_RTT_CANON(array32_index))),
+ WASM_LOCAL_SET(
+ 1, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array32_index, WASM_I32V(10),
+ WASM_RTT_CANON(array32_index))),
WASM_ARRAY_COPY(array32_index, array32_index, WASM_LOCAL_GET(1),
WASM_I32V(6), WASM_LOCAL_GET(0), WASM_I32V(8),
WASM_I32V(4)),
@@ -1018,15 +1100,30 @@ WASM_COMPILED_EXEC_TEST(WasmArrayCopy) {
const byte kOobDestination = tester.DefineFunction(
tester.sigs.v_v(), {optref(array32_index), optref(array32_index)},
- {WASM_LOCAL_SET(0, WASM_ARRAY_NEW_DEFAULT(array32_index, WASM_I32V(10),
- WASM_RTT_CANON(array32_index))),
- WASM_LOCAL_SET(1, WASM_ARRAY_NEW_DEFAULT(array32_index, WASM_I32V(10),
- WASM_RTT_CANON(array32_index))),
+ {WASM_LOCAL_SET(
+ 0, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array32_index, WASM_I32V(10),
+ WASM_RTT_CANON(array32_index))),
+ WASM_LOCAL_SET(
+ 1, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array32_index, WASM_I32V(10),
+ WASM_RTT_CANON(array32_index))),
WASM_ARRAY_COPY(array32_index, array32_index, WASM_LOCAL_GET(1),
WASM_I32V(6), WASM_LOCAL_GET(0), WASM_I32V(3),
WASM_I32V(5)),
kExprEnd});
+ const byte kZeroLength = tester.DefineFunction(
+ tester.sigs.i_v(), {optref(arrayref_index), optref(arrayref_index)},
+ {WASM_LOCAL_SET(
+ 0, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(arrayref_index, WASM_I32V(10),
+ WASM_RTT_CANON(arrayref_index))),
+ WASM_LOCAL_SET(
+ 1, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(arrayref_index, WASM_I32V(10),
+ WASM_RTT_CANON(arrayref_index))),
+ WASM_ARRAY_COPY(arrayref_index, arrayref_index, WASM_LOCAL_GET(1),
+ WASM_I32V(6), WASM_LOCAL_GET(0), WASM_I32V(3),
+ WASM_I32V(0)),
+ WASM_I32V(0), kExprEnd});
+
tester.CompileModule();
tester.CheckResult(kCopyI32, 0, 5);
@@ -1069,6 +1166,7 @@ WASM_COMPILED_EXEC_TEST(WasmArrayCopy) {
tester.CheckHasThrown(kOobSource);
tester.CheckHasThrown(kOobDestination);
+ tester.CheckResult(kZeroLength, 0); // Does not throw.
}
WASM_COMPILED_EXEC_TEST(NewDefault) {
@@ -1079,8 +1177,8 @@ WASM_COMPILED_EXEC_TEST(NewDefault) {
// Returns: struct[0] + f64_to_i32(struct[1]) + (struct[2].is_null ^ 1) == 0.
const byte allocate_struct = tester.DefineFunction(
tester.sigs.i_v(), {optref(struct_type)},
- {WASM_LOCAL_SET(0, WASM_STRUCT_NEW_DEFAULT(struct_type,
- WASM_RTT_CANON(struct_type))),
+ {WASM_LOCAL_SET(0, WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
+ struct_type, WASM_RTT_CANON(struct_type))),
WASM_I32_ADD(
WASM_I32_ADD(WASM_STRUCT_GET(struct_type, 0, WASM_LOCAL_GET(0)),
WASM_I32_SCONVERT_F64(WASM_STRUCT_GET(
@@ -1091,8 +1189,9 @@ WASM_COMPILED_EXEC_TEST(NewDefault) {
kExprEnd});
const byte allocate_array = tester.DefineFunction(
tester.sigs.i_v(), {optref(array_type)},
- {WASM_LOCAL_SET(0, WASM_ARRAY_NEW_DEFAULT(array_type, WASM_I32V(2),
- WASM_RTT_CANON(array_type))),
+ {WASM_LOCAL_SET(
+ 0, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array_type, WASM_I32V(2),
+ WASM_RTT_CANON(array_type))),
WASM_I32_ADD(
WASM_ARRAY_GET(array_type, WASM_LOCAL_GET(0), WASM_I32V(0)),
WASM_ARRAY_GET(array_type, WASM_LOCAL_GET(0), WASM_I32V(1))),
@@ -1227,7 +1326,7 @@ WASM_COMPILED_EXEC_TEST(RefTrivialCasts) {
const byte kRefTestUpcast = tester.DefineFunction(
tester.sigs.i_v(), {},
{WASM_REF_TEST(
- WASM_STRUCT_NEW_DEFAULT(
+ WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
subtype_index,
WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index))),
WASM_RTT_CANON(type_index)),
@@ -1239,7 +1338,7 @@ WASM_COMPILED_EXEC_TEST(RefTrivialCasts) {
const byte kRefTestUnrelated = tester.DefineFunction(
tester.sigs.i_v(), {},
{WASM_REF_TEST(
- WASM_STRUCT_NEW_DEFAULT(
+ WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
subtype_index,
WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index))),
WASM_RTT_CANON(sig_index)),
@@ -1250,9 +1349,9 @@ WASM_COMPILED_EXEC_TEST(RefTrivialCasts) {
kExprEnd});
const byte kRefTestUnrelatedNonNullable = tester.DefineFunction(
tester.sigs.i_v(), {},
- {WASM_REF_TEST(
- WASM_STRUCT_NEW_DEFAULT(type_index, WASM_RTT_CANON(type_index)),
- WASM_RTT_CANON(sig_index)),
+ {WASM_REF_TEST(WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
+ type_index, WASM_RTT_CANON(type_index)),
+ WASM_RTT_CANON(sig_index)),
kExprEnd});
const byte kRefCastNull = tester.DefineFunction(
@@ -1263,7 +1362,7 @@ WASM_COMPILED_EXEC_TEST(RefTrivialCasts) {
const byte kRefCastUpcast = tester.DefineFunction(
tester.sigs.i_v(), {},
{WASM_REF_IS_NULL(WASM_REF_CAST(
- WASM_STRUCT_NEW_DEFAULT(
+ WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
subtype_index,
WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index))),
WASM_RTT_CANON(type_index))),
@@ -1276,7 +1375,7 @@ WASM_COMPILED_EXEC_TEST(RefTrivialCasts) {
const byte kRefCastUnrelated = tester.DefineFunction(
tester.sigs.i_v(), {},
{WASM_REF_IS_NULL(WASM_REF_CAST(
- WASM_STRUCT_NEW_DEFAULT(
+ WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
subtype_index,
WASM_RTT_SUB(subtype_index, WASM_RTT_CANON(type_index))),
WASM_RTT_CANON(sig_index))),
@@ -1286,11 +1385,95 @@ WASM_COMPILED_EXEC_TEST(RefTrivialCasts) {
{WASM_REF_IS_NULL(WASM_REF_CAST(WASM_REF_NULL(subtype_index),
WASM_RTT_CANON(sig_index))),
kExprEnd});
+ const byte kRefCastUnrelatedNonNullable =
+ tester.DefineFunction(tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_CAST(
+ WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
+ type_index, WASM_RTT_CANON(type_index)),
+ WASM_RTT_CANON(sig_index))),
+ kExprEnd});
+
+ tester.CompileModule();
+
+ tester.CheckResult(kRefTestNull, 0);
+ tester.CheckResult(kRefTestUpcast, 1);
+ tester.CheckResult(kRefTestUpcastNull, 0);
+ tester.CheckResult(kRefTestUnrelated, 0);
+ tester.CheckResult(kRefTestUnrelatedNull, 0);
+ tester.CheckResult(kRefTestUnrelatedNonNullable, 0);
+
+ tester.CheckResult(kRefCastNull, 1);
+ tester.CheckResult(kRefCastUpcast, 0);
+ tester.CheckResult(kRefCastUpcastNull, 1);
+ tester.CheckHasThrown(kRefCastUnrelated);
+ tester.CheckResult(kRefCastUnrelatedNull, 1);
+ tester.CheckHasThrown(kRefCastUnrelatedNonNullable);
+}
+
+WASM_COMPILED_EXEC_TEST(RefTrivialCastsStatic) {
+ WasmGCTester tester(execution_tier);
+ byte type_index =
+ tester.DefineStruct({F(wasm::kWasmI32, true)}, kGenericSuperType);
+ byte subtype_index = tester.DefineStruct(
+ {F(wasm::kWasmI32, true), F(wasm::kWasmS128, false)}, type_index);
+ ValueType sig_types[] = {kWasmS128, kWasmI32, kWasmF64};
+ FunctionSig sig(1, 2, sig_types);
+ byte sig_index = tester.DefineSignature(&sig);
+
+ const byte kRefTestNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_TEST_STATIC(WASM_REF_NULL(type_index), subtype_index),
+ kExprEnd});
+ const byte kRefTestUpcast = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_TEST_STATIC(WASM_STRUCT_NEW_DEFAULT(subtype_index), type_index),
+ kExprEnd});
+ const byte kRefTestUpcastNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_TEST_STATIC(WASM_REF_NULL(subtype_index), type_index),
+ kExprEnd});
+ const byte kRefTestUnrelated = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_TEST_STATIC(WASM_STRUCT_NEW_DEFAULT(subtype_index), sig_index),
+ kExprEnd});
+ const byte kRefTestUnrelatedNull = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_TEST_STATIC(WASM_REF_NULL(subtype_index), sig_index),
+ kExprEnd});
+ const byte kRefTestUnrelatedNonNullable = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_TEST_STATIC(WASM_STRUCT_NEW_DEFAULT(type_index), sig_index),
+ kExprEnd});
+
+ const byte kRefCastNull =
+ tester.DefineFunction(tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_CAST_STATIC(
+ WASM_REF_NULL(type_index), subtype_index)),
+ kExprEnd});
+ const byte kRefCastUpcast = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_CAST_STATIC(
+ WASM_STRUCT_NEW_DEFAULT(subtype_index), type_index)),
+ kExprEnd});
+ const byte kRefCastUpcastNull =
+ tester.DefineFunction(tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_CAST_STATIC(
+ WASM_REF_NULL(subtype_index), type_index)),
+ kExprEnd});
+ const byte kRefCastUnrelated = tester.DefineFunction(
+ tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_CAST_STATIC(
+ WASM_STRUCT_NEW_DEFAULT(subtype_index), sig_index)),
+ kExprEnd});
+ const byte kRefCastUnrelatedNull =
+ tester.DefineFunction(tester.sigs.i_v(), {},
+ {WASM_REF_IS_NULL(WASM_REF_CAST_STATIC(
+ WASM_REF_NULL(subtype_index), sig_index)),
+ kExprEnd});
const byte kRefCastUnrelatedNonNullable = tester.DefineFunction(
tester.sigs.i_v(), {},
- {WASM_REF_IS_NULL(WASM_REF_CAST(
- WASM_STRUCT_NEW_DEFAULT(type_index, WASM_RTT_CANON(type_index)),
- WASM_RTT_CANON(sig_index))),
+ {WASM_REF_IS_NULL(WASM_REF_CAST_STATIC(
+ WASM_STRUCT_NEW_DEFAULT(type_index), sig_index)),
kExprEnd});
tester.CompileModule();
@@ -1404,6 +1587,9 @@ WASM_COMPILED_EXEC_TEST(ArrayNewMap) {
{WASM_ARRAY_NEW_WITH_RTT(type_index, WASM_I32V(10), WASM_I32V(42),
WASM_RTT_CANON(type_index)),
kExprEnd});
+ const byte array_new_nominal = tester.DefineFunction(
+ &sig, {},
+ {WASM_ARRAY_NEW(type_index, WASM_I32V(10), WASM_I32V(42)), kExprEnd});
ValueType rtt_type = ValueType::Rtt(type_index, 0);
FunctionSig rtt_canon_sig(1, 0, &rtt_type);
@@ -1418,6 +1604,10 @@ WASM_COMPILED_EXEC_TEST(ArrayNewMap) {
tester.GetResultObject(array_new_with_rtt).ToHandleChecked();
CHECK(result->IsWasmArray());
CHECK_EQ(Handle<WasmArray>::cast(result)->map(), *map);
+
+ result = tester.GetResultObject(array_new_nominal).ToHandleChecked();
+ CHECK(result->IsWasmArray());
+ CHECK_EQ(Handle<WasmArray>::cast(result)->map(), *map);
}
WASM_COMPILED_EXEC_TEST(FunctionRefs) {
@@ -1593,18 +1783,18 @@ WASM_COMPILED_EXEC_TEST(AbstractTypeChecks) {
{WASM_LOCAL_SET(0, WASM_SEQ(value)), \
WASM_REF_IS_##type(WASM_LOCAL_GET(0)), kExprEnd})
- byte kDataCheckSuccess =
- TYPE_CHECK(DATA, WASM_ARRAY_NEW_DEFAULT(array_index, WASM_I32V(10),
- WASM_RTT_CANON(array_index)));
+ byte kDataCheckSuccess = TYPE_CHECK(
+ DATA, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
byte kDataCheckFailure = TYPE_CHECK(DATA, WASM_I31_NEW(WASM_I32V(42)));
byte kFuncCheckSuccess = TYPE_CHECK(FUNC, WASM_REF_FUNC(function_index));
- byte kFuncCheckFailure =
- TYPE_CHECK(FUNC, WASM_ARRAY_NEW_DEFAULT(array_index, WASM_I32V(10),
- WASM_RTT_CANON(array_index)));
+ byte kFuncCheckFailure = TYPE_CHECK(
+ FUNC, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
byte kI31CheckSuccess = TYPE_CHECK(I31, WASM_I31_NEW(WASM_I32V(42)));
- byte kI31CheckFailure =
- TYPE_CHECK(I31, WASM_ARRAY_NEW_DEFAULT(array_index, WASM_I32V(10),
- WASM_RTT_CANON(array_index)));
+ byte kI31CheckFailure = TYPE_CHECK(
+ I31, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
#undef TYPE_CHECK
#define TYPE_CAST(type, value) \
@@ -1613,18 +1803,18 @@ WASM_COMPILED_EXEC_TEST(AbstractTypeChecks) {
WASM_REF_AS_##type(WASM_LOCAL_GET(0)), WASM_DROP, \
WASM_I32V(1), kExprEnd})
- byte kDataCastSuccess =
- TYPE_CAST(DATA, WASM_ARRAY_NEW_DEFAULT(array_index, WASM_I32V(10),
- WASM_RTT_CANON(array_index)));
+ byte kDataCastSuccess = TYPE_CAST(
+ DATA, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
byte kDataCastFailure = TYPE_CAST(DATA, WASM_I31_NEW(WASM_I32V(42)));
byte kFuncCastSuccess = TYPE_CAST(FUNC, WASM_REF_FUNC(function_index));
- byte kFuncCastFailure =
- TYPE_CAST(FUNC, WASM_ARRAY_NEW_DEFAULT(array_index, WASM_I32V(10),
- WASM_RTT_CANON(array_index)));
- byte kI31CastSuccess = TYPE_CAST(I31, WASM_I31_NEW(WASM_I32V(42)));
- byte kI31CastFailure =
- TYPE_CAST(I31, WASM_ARRAY_NEW_DEFAULT(array_index, WASM_I32V(10),
+ byte kFuncCastFailure = TYPE_CAST(
+ FUNC, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array_index, WASM_I32V(10),
WASM_RTT_CANON(array_index)));
+ byte kI31CastSuccess = TYPE_CAST(I31, WASM_I31_NEW(WASM_I32V(42)));
+ byte kI31CastFailure = TYPE_CAST(
+ I31, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
#undef TYPE_CAST
// If the branch is not taken, we return 0. If it is taken, then the respective
@@ -1640,16 +1830,16 @@ WASM_COMPILED_EXEC_TEST(AbstractTypeChecks) {
byte kBrOnDataTaken =
BR_ON(DATA, Data,
- WASM_ARRAY_NEW_DEFAULT(array_index, WASM_I32V(10),
- WASM_RTT_CANON(array_index)));
+ WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
byte kBrOnDataNotTaken = BR_ON(DATA, Data, WASM_REF_FUNC(function_index));
byte kBrOnFuncTaken = BR_ON(FUNC, Func, WASM_REF_FUNC(function_index));
byte kBrOnFuncNotTaken = BR_ON(FUNC, Func, WASM_I31_NEW(WASM_I32V(42)));
byte kBrOnI31Taken = BR_ON(I31, I31, WASM_I31_NEW(WASM_I32V(42)));
byte kBrOnI31NotTaken =
BR_ON(I31, I31,
- WASM_ARRAY_NEW_DEFAULT(array_index, WASM_I32V(10),
- WASM_RTT_CANON(array_index)));
+ WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
#undef BR_ON
// If the branch is not taken, we return 1. If it is taken, then the respective
@@ -1665,8 +1855,8 @@ WASM_COMPILED_EXEC_TEST(AbstractTypeChecks) {
byte kBrOnNonDataNotTaken =
BR_ON_NON(DATA, Data,
- WASM_ARRAY_NEW_DEFAULT(array_index, WASM_I32V(10),
- WASM_RTT_CANON(array_index)));
+ WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
byte kBrOnNonDataTaken = BR_ON_NON(DATA, Data, WASM_REF_FUNC(function_index));
byte kBrOnNonFuncNotTaken =
BR_ON_NON(FUNC, Func, WASM_REF_FUNC(function_index));
@@ -1674,8 +1864,8 @@ WASM_COMPILED_EXEC_TEST(AbstractTypeChecks) {
byte kBrOnNonI31NotTaken = BR_ON_NON(I31, I31, WASM_I31_NEW(WASM_I32V(42)));
byte kBrOnNonI31Taken =
BR_ON_NON(I31, I31,
- WASM_ARRAY_NEW_DEFAULT(array_index, WASM_I32V(10),
- WASM_RTT_CANON(array_index)));
+ WASM_ARRAY_NEW_DEFAULT_WITH_RTT(array_index, WASM_I32V(10),
+ WASM_RTT_CANON(array_index)));
#undef BR_ON_NON
tester.CompileModule();
@@ -1769,9 +1959,9 @@ WASM_COMPILED_EXEC_TEST(CastsBenchmark) {
const byte Prepare = tester.DefineFunction(
tester.sigs.i_v(), {wasm::kWasmI32},
{// List = new eqref[kListLength];
- WASM_GLOBAL_SET(List,
- WASM_ARRAY_NEW_DEFAULT(ListType, WASM_I32V(kListLength),
- WASM_GLOBAL_GET(RttList))),
+ WASM_GLOBAL_SET(List, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(
+ ListType, WASM_I32V(kListLength),
+ WASM_GLOBAL_GET(RttList))),
// for (int i = 0; i < kListLength; ) {
// List[i] = new Super(i);
// i++;
diff --git a/deps/v8/test/cctest/wasm/test-liftoff-for-fuzzing.cc b/deps/v8/test/cctest/wasm/test-liftoff-for-fuzzing.cc
index ae168efda8..3843089dd8 100644
--- a/deps/v8/test/cctest/wasm/test-liftoff-for-fuzzing.cc
+++ b/deps/v8/test/cctest/wasm/test-liftoff-for-fuzzing.cc
@@ -40,7 +40,7 @@ TEST(NondeterminismUnopF64) {
CHECK(r.HasNondeterminism());
}
-TEST(NondeterminismUnopF32x4) {
+TEST(NondeterminismUnopF32x4AllNaN) {
WasmRunner<int32_t, float> r(TestExecutionTier::kLiftoffForFuzzing);
byte value = 0;
@@ -55,7 +55,21 @@ TEST(NondeterminismUnopF32x4) {
CHECK(r.HasNondeterminism());
}
-TEST(NondeterminismUnopF64x2) {
+TEST(NondeterminismUnopF32x4OneNaN) {
+ for (byte lane = 0; lane < 4; ++lane) {
+ WasmRunner<int32_t, float> r(TestExecutionTier::kLiftoffForFuzzing);
+ BUILD(r, WASM_SIMD_F32x4_SPLAT(WASM_F32(0)), WASM_LOCAL_GET(0),
+ WASM_SIMD_OP(kExprF32x4ReplaceLane), lane,
+ WASM_SIMD_OP(kExprF32x4Ceil), kExprDrop, WASM_ONE);
+ CHECK(!r.HasNondeterminism());
+ r.CheckCallViaJS(1, 0.0);
+ CHECK(!r.HasNondeterminism());
+ r.CheckCallViaJS(1, std::nanf(""));
+ CHECK(r.HasNondeterminism());
+ }
+}
+
+TEST(NondeterminismUnopF64x2AllNaN) {
WasmRunner<int32_t, double> r(TestExecutionTier::kLiftoffForFuzzing);
byte value = 0;
@@ -70,6 +84,20 @@ TEST(NondeterminismUnopF64x2) {
CHECK(r.HasNondeterminism());
}
+TEST(NondeterminismUnopF64x2OneNaN) {
+ for (byte lane = 0; lane < 2; ++lane) {
+ WasmRunner<int32_t, double> r(TestExecutionTier::kLiftoffForFuzzing);
+ BUILD(r, WASM_SIMD_F64x2_SPLAT(WASM_F64(0)), WASM_LOCAL_GET(0),
+ WASM_SIMD_OP(kExprF64x2ReplaceLane), lane,
+ WASM_SIMD_OP(kExprF64x2Ceil), kExprDrop, WASM_ONE);
+ CHECK(!r.HasNondeterminism());
+ r.CheckCallViaJS(1, 0.0);
+ CHECK(!r.HasNondeterminism());
+ r.CheckCallViaJS(1, std::nan(""));
+ CHECK(r.HasNondeterminism());
+ }
+}
+
TEST(NondeterminismBinop) {
WasmRunner<float> r(TestExecutionTier::kLiftoffForFuzzing);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index 74a07ab620..510f446f84 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -45,7 +45,7 @@ WASM_EXEC_TEST(I64Const_many) {
WASM_EXEC_TEST(Return_I64) {
WasmRunner<int64_t, int64_t> r(execution_tier);
- BUILD(r, WASM_RETURN1(WASM_LOCAL_GET(0)));
+ BUILD(r, WASM_RETURN(WASM_LOCAL_GET(0)));
FOR_INT64_INPUTS(i) { CHECK_EQ(i, r.Call(i)); }
}
@@ -67,9 +67,10 @@ const int64_t kHasBit33On = 0x100000000;
WASM_EXEC_TEST(Regress5800_Add) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK(WASM_BR_IF(0, WASM_I64_EQZ(WASM_I64_ADD(
- WASM_I64V(0), WASM_I64V(kHasBit33On)))),
- WASM_RETURN1(WASM_I32V(0))),
+ BUILD(r,
+ WASM_BLOCK(WASM_BR_IF(0, WASM_I64_EQZ(WASM_I64_ADD(
+ WASM_I64V(0), WASM_I64V(kHasBit33On)))),
+ WASM_RETURN(WASM_I32V(0))),
WASM_I32V(0));
CHECK_EQ(0, r.Call());
}
@@ -86,9 +87,10 @@ WASM_EXEC_TEST(I64Sub) {
WASM_EXEC_TEST(Regress5800_Sub) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r, WASM_BLOCK(WASM_BR_IF(0, WASM_I64_EQZ(WASM_I64_SUB(
- WASM_I64V(0), WASM_I64V(kHasBit33On)))),
- WASM_RETURN1(WASM_I32V(0))),
+ BUILD(r,
+ WASM_BLOCK(WASM_BR_IF(0, WASM_I64_EQZ(WASM_I64_SUB(
+ WASM_I64V(0), WASM_I64V(kHasBit33On)))),
+ WASM_RETURN(WASM_I32V(0))),
WASM_I32V(0));
CHECK_EQ(0, r.Call());
}
@@ -1479,12 +1481,6 @@ WASM_EXEC_TEST(UnalignedInt64Store) {
r.Call();
}
-#define ADD_CODE(vec, ...) \
- do { \
- byte __buf[] = {__VA_ARGS__}; \
- for (size_t i = 0; i < sizeof(__buf); i++) vec.push_back(__buf[i]); \
- } while (false)
-
static void CompileCallIndirectMany(TestExecutionTier tier, ValueType param) {
// Make sure we don't run out of registers when compiling indirect calls
// with many many parameters.
@@ -1543,8 +1539,8 @@ static void Run_WasmMixedCall_N(TestExecutionTier execution_tier, int start) {
for (int i = 0; i < num_params; i++) {
b.AddParam(ValueType::For(memtypes[i]));
}
- WasmFunctionCompiler& t = r.NewFunction(b.Build());
- BUILD(t, WASM_LOCAL_GET(which));
+ WasmFunctionCompiler& f = r.NewFunction(b.Build());
+ BUILD(f, WASM_LOCAL_GET(which));
// =========================================================================
// Build the calling function.
@@ -1558,7 +1554,7 @@ static void Run_WasmMixedCall_N(TestExecutionTier execution_tier, int start) {
}
// Call the selector function.
- ADD_CODE(code, WASM_CALL_FUNCTION0(t.function_index()));
+ ADD_CODE(code, WASM_CALL_FUNCTION0(f.function_index()));
// Store the result in a local.
byte local_index = r.AllocateLocal(ValueType::For(result));
@@ -1582,8 +1578,8 @@ static void Run_WasmMixedCall_N(TestExecutionTier execution_tier, int start) {
for (int i = 0; i < size; i++) {
int base = (which + 1) * kElemSize;
byte expected = r.builder().raw_mem_at<byte>(base + i);
- byte result = r.builder().raw_mem_at<byte>(i);
- CHECK_EQ(expected, result);
+ byte actual = r.builder().raw_mem_at<byte>(i);
+ CHECK_EQ(expected, actual);
}
}
}
@@ -1617,8 +1613,6 @@ WASM_EXEC_TEST(Regression_6858) {
CHECK_TRAP64(r.Call(dividend, divisor, filler, filler));
}
-#undef ADD_CODE
-
// clang-format gets confused about these closing parentheses (wants to change
// the first comment to "// namespace v8". Disable it.
// clang-format off
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
index 4b6b28d3f2..6ca2153365 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
@@ -38,7 +38,7 @@ TEST(Run_WasmIfElse) {
TEST(Run_WasmIfReturn) {
WasmRunner<int32_t, int32_t> r(TestExecutionTier::kInterpreter);
- BUILD(r, WASM_IF(WASM_LOCAL_GET(0), WASM_RETURN1(WASM_I32V_2(77))),
+ BUILD(r, WASM_IF(WASM_LOCAL_GET(0), WASM_RETURN(WASM_I32V_2(77))),
WASM_I32V_2(65));
CHECK_EQ(65, r.Call(0));
CHECK_EQ(77, r.Call(1));
@@ -511,6 +511,38 @@ TEST(Regress1247119) {
r.Call();
}
+TEST(Regress1246712) {
+ WasmRunner<uint32_t> r(TestExecutionTier::kInterpreter);
+ TestSignatures sigs;
+ const int kExpected = 1;
+ uint8_t except = r.builder().AddException(sigs.v_v());
+ BUILD(r, kExprTry, kWasmI32.value_type_code(), kExprTry,
+ kWasmI32.value_type_code(), kExprThrow, except, kExprEnd, kExprCatchAll,
+ kExprI32Const, kExpected, kExprEnd);
+ CHECK_EQ(kExpected, r.Call());
+}
+
+TEST(Regress1249306) {
+ WasmRunner<uint32_t> r(TestExecutionTier::kInterpreter);
+ BUILD(r, kExprTry, kVoid, kExprCatchAll, kExprTry, kVoid, kExprDelegate, 0,
+ kExprEnd, kExprI32Const, 0);
+ r.Call();
+}
+
+TEST(Regress1251845) {
+ WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(
+ TestExecutionTier::kInterpreter);
+ ValueType reps[] = {kWasmI32, kWasmI32, kWasmI32, kWasmI32};
+ FunctionSig sig_iii_i(1, 3, reps);
+ byte sig = r.builder().AddSignature(&sig_iii_i);
+ BUILD(r, kExprI32Const, 0, kExprI32Const, 0, kExprI32Const, 0, kExprTry, sig,
+ kExprI32Const, 0, kExprTry, 0, kExprTry, 0, kExprI32Const, 0, kExprTry,
+ sig, kExprUnreachable, kExprTry, 0, kExprUnreachable, kExprEnd,
+ kExprTry, sig, kExprUnreachable, kExprEnd, kExprEnd, kExprUnreachable,
+ kExprEnd, kExprEnd, kExprUnreachable, kExprEnd);
+ r.Call(0, 0, 0);
+}
+
} // namespace test_run_wasm_interpreter
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
index 99b539bbc5..88bc0d70a2 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
@@ -21,12 +21,6 @@ namespace v8 {
namespace internal {
namespace wasm {
-#define ADD_CODE(vec, ...) \
- do { \
- byte __buf[] = {__VA_ARGS__}; \
- for (size_t i = 0; i < sizeof(__buf); i++) vec.push_back(__buf[i]); \
- } while (false)
-
namespace {
// A helper for generating predictable but unique argument values that
// are easy to debug (e.g. with misaligned stacks).
@@ -571,8 +565,6 @@ WASM_COMPILED_EXEC_TEST(Run_ReturnCallIndirectImportedFunction) {
RunPickerTest(execution_tier, true);
}
-#undef ADD_CODE
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index e4dae907f2..81efe93eb6 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -435,7 +435,7 @@ TEST(Run_WasmModule_Global) {
ExportAsMain(f2);
byte code2[] = {WASM_GLOBAL_SET(global1, WASM_I32V_1(56)),
WASM_GLOBAL_SET(global2, WASM_I32V_1(41)),
- WASM_RETURN1(WASM_CALL_FUNCTION0(f1->func_index()))};
+ WASM_RETURN(WASM_CALL_FUNCTION0(f1->func_index()))};
EMIT_CODE_WITH_END(f2, code2);
TestModule(&zone, builder, 97);
}
@@ -555,12 +555,12 @@ TEST(TestInterruptLoop) {
ExportAsMain(f);
byte code[] = {
WASM_LOOP(
- WASM_IFB(WASM_NOT(WASM_LOAD_MEM(
- MachineType::Int32(),
- WASM_I32V(InterruptThread::interrupt_location_ * 4))),
- WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO,
- WASM_I32V(InterruptThread::signal_value_)),
- WASM_BR(1))),
+ WASM_IF(WASM_NOT(WASM_LOAD_MEM(
+ MachineType::Int32(),
+ WASM_I32V(InterruptThread::interrupt_location_ * 4))),
+ WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO,
+ WASM_I32V(InterruptThread::signal_value_)),
+ WASM_BR(1))),
WASM_I32V(121)};
EMIT_CODE_WITH_END(f, code);
ZoneBuffer buffer(&zone);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index 3ba26da89c..c3e36c2068 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -250,14 +250,14 @@ T Abs(T a) {
WASM_IF(WASM_##LANE_TYPE##_NE(WASM_LOCAL_GET(lane_value), \
WASM_SIMD_##TYPE##_EXTRACT_LANE( \
lane_index, WASM_LOCAL_GET(value))), \
- WASM_RETURN1(WASM_ZERO))
+ WASM_RETURN(WASM_ZERO))
// Unsigned Extracts are only available for I8x16, I16x8 types
#define WASM_SIMD_CHECK_LANE_U(TYPE, value, LANE_TYPE, lane_value, lane_index) \
WASM_IF(WASM_##LANE_TYPE##_NE(WASM_LOCAL_GET(lane_value), \
WASM_SIMD_##TYPE##_EXTRACT_LANE_U( \
lane_index, WASM_LOCAL_GET(value))), \
- WASM_RETURN1(WASM_ZERO))
+ WASM_RETURN(WASM_ZERO))
WASM_SIMD_TEST(S128Globals) {
WasmRunner<int32_t> r(execution_tier);
@@ -351,6 +351,49 @@ WASM_SIMD_TEST(F32x4ConvertI32x4) {
}
}
+template <typename FloatType, typename ScalarType>
+void RunF128CompareOpConstImmTest(
+ TestExecutionTier execution_tier, WasmOpcode cmp_opcode,
+ WasmOpcode splat_opcode, ScalarType (*expected_op)(FloatType, FloatType)) {
+ for (FloatType x : compiler::ValueHelper::GetVector<FloatType>()) {
+ if (!PlatformCanRepresent(x)) continue;
+ WasmRunner<int32_t, FloatType> r(execution_tier);
+ // Set up globals to hold mask output for left and right cases
+ ScalarType* g1 = r.builder().template AddGlobal<ScalarType>(kWasmS128);
+ ScalarType* g2 = r.builder().template AddGlobal<ScalarType>(kWasmS128);
+ // Build fn to splat test values, perform compare op on both sides, and
+ // write the result.
+ byte value = 0;
+ byte temp = r.AllocateLocal(kWasmS128);
+ uint8_t const_buffer[kSimd128Size];
+ for (size_t i = 0; i < kSimd128Size / sizeof(FloatType); i++) {
+ WriteLittleEndianValue<FloatType>(
+ bit_cast<FloatType*>(&const_buffer[0]) + i, x);
+ }
+ BUILD(r,
+ WASM_LOCAL_SET(temp,
+ WASM_SIMD_OPN(splat_opcode, WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(
+ 0, WASM_SIMD_BINOP(cmp_opcode, WASM_SIMD_CONSTANT(const_buffer),
+ WASM_LOCAL_GET(temp))),
+ WASM_GLOBAL_SET(1, WASM_SIMD_BINOP(cmp_opcode, WASM_LOCAL_GET(temp),
+ WASM_SIMD_CONSTANT(const_buffer))),
+ WASM_ONE);
+ for (FloatType y : compiler::ValueHelper::GetVector<FloatType>()) {
+ if (!PlatformCanRepresent(y)) continue;
+ FloatType diff = x - y; // Model comparison as subtraction.
+ if (!PlatformCanRepresent(diff)) continue;
+ r.Call(y);
+ ScalarType expected1 = expected_op(x, y);
+ ScalarType expected2 = expected_op(y, x);
+ for (size_t i = 0; i < kSimd128Size / sizeof(ScalarType); i++) {
+ CHECK_EQ(expected1, LANE(g1, i));
+ CHECK_EQ(expected2, LANE(g2, i));
+ }
+ }
+ }
+}
+
WASM_SIMD_TEST(F32x4Abs) {
RunF32x4UnOpTest(execution_tier, kExprF32x4Abs, std::abs);
}
@@ -470,6 +513,36 @@ void RunShiftAddTestSequence(TestExecutionTier execution_tier,
}
}
+WASM_SIMD_TEST(F32x4EqZero) {
+ RunF128CompareOpConstImmTest<float, int32_t>(execution_tier, kExprF32x4Eq,
+ kExprF32x4Splat, Equal);
+}
+
+WASM_SIMD_TEST(F32x4NeZero) {
+ RunF128CompareOpConstImmTest<float, int32_t>(execution_tier, kExprF32x4Ne,
+ kExprF32x4Splat, NotEqual);
+}
+
+WASM_SIMD_TEST(F32x4GtZero) {
+ RunF128CompareOpConstImmTest<float, int32_t>(execution_tier, kExprF32x4Gt,
+ kExprF32x4Splat, Greater);
+}
+
+WASM_SIMD_TEST(F32x4GeZero) {
+ RunF128CompareOpConstImmTest<float, int32_t>(execution_tier, kExprF32x4Ge,
+ kExprF32x4Splat, GreaterEqual);
+}
+
+WASM_SIMD_TEST(F32x4LtZero) {
+ RunF128CompareOpConstImmTest<float, int32_t>(execution_tier, kExprF32x4Lt,
+ kExprF32x4Splat, Less);
+}
+
+WASM_SIMD_TEST(F32x4LeZero) {
+ RunF128CompareOpConstImmTest<float, int32_t>(execution_tier, kExprF32x4Le,
+ kExprF32x4Splat, LessEqual);
+}
+
WASM_SIMD_TEST(I64x2Splat) {
WasmRunner<int32_t, int64_t> r(execution_tier);
// Set up a global to hold output vector.
@@ -810,6 +883,51 @@ WASM_SIMD_TEST(F64x2PromoteLowF32x4) {
}
}
+// Test F64x2PromoteLowF32x4 with S128Load64Zero optimization (only on some
+// architectures). These 2 opcodes should be fused into a single instruction
+// with memory operands, which is tested in instruction-selector tests. This
+// test checks that we get correct results.
+WASM_SIMD_TEST(F64x2PromoteLowF32x4WithS128Load64Zero) {
+ {
+ WasmRunner<int32_t> r(execution_tier);
+ double* g = r.builder().AddGlobal<double>(kWasmS128);
+ float* memory =
+ r.builder().AddMemoryElems<float>(kWasmPageSize / sizeof(float));
+ r.builder().RandomizeMemory();
+ r.builder().WriteMemory(&memory[0], 1.0f);
+ r.builder().WriteMemory(&memory[1], 3.0f);
+ r.builder().WriteMemory(&memory[2], 5.0f);
+ r.builder().WriteMemory(&memory[3], 8.0f);
+
+ // Load at 4 (index) + 4 (offset) bytes, which is 2 floats.
+ BUILD(r,
+ WASM_GLOBAL_SET(
+ 0, WASM_SIMD_UNOP(kExprF64x2PromoteLowF32x4,
+ WASM_SIMD_LOAD_OP_OFFSET(kExprS128Load64Zero,
+ WASM_I32V(4), 4))),
+ WASM_ONE);
+
+ r.Call();
+ CHECK_EQ(5.0f, LANE(g, 0));
+ CHECK_EQ(8.0f, LANE(g, 1));
+ }
+
+ {
+ // OOB tests.
+ WasmRunner<int32_t> r(execution_tier);
+ r.builder().AddGlobal<double>(kWasmS128);
+ r.builder().AddMemoryElems<float>(kWasmPageSize / sizeof(float));
+ BUILD(r,
+ WASM_GLOBAL_SET(
+ 0, WASM_SIMD_UNOP(kExprF64x2PromoteLowF32x4,
+ WASM_SIMD_LOAD_OP(kExprS128Load64Zero,
+ WASM_I32V(kWasmPageSize)))),
+ WASM_ONE);
+
+ CHECK_TRAP(r.Call());
+ }
+}
+
WASM_SIMD_TEST(F64x2Add) {
RunF64x2BinOpTest(execution_tier, kExprF64x2Add, Add);
}
@@ -858,6 +976,36 @@ WASM_SIMD_TEST(F64x2Le) {
RunF64x2CompareOpTest(execution_tier, kExprF64x2Le, LessEqual);
}
+WASM_SIMD_TEST(F64x2EqZero) {
+ RunF128CompareOpConstImmTest<double, int64_t>(execution_tier, kExprF64x2Eq,
+ kExprF64x2Splat, Equal);
+}
+
+WASM_SIMD_TEST(F64x2NeZero) {
+ RunF128CompareOpConstImmTest<double, int64_t>(execution_tier, kExprF64x2Ne,
+ kExprF64x2Splat, NotEqual);
+}
+
+WASM_SIMD_TEST(F64x2GtZero) {
+ RunF128CompareOpConstImmTest<double, int64_t>(execution_tier, kExprF64x2Gt,
+ kExprF64x2Splat, Greater);
+}
+
+WASM_SIMD_TEST(F64x2GeZero) {
+ RunF128CompareOpConstImmTest<double, int64_t>(execution_tier, kExprF64x2Ge,
+ kExprF64x2Splat, GreaterEqual);
+}
+
+WASM_SIMD_TEST(F64x2LtZero) {
+ RunF128CompareOpConstImmTest<double, int64_t>(execution_tier, kExprF64x2Lt,
+ kExprF64x2Splat, Less);
+}
+
+WASM_SIMD_TEST(F64x2LeZero) {
+ RunF128CompareOpConstImmTest<double, int64_t>(execution_tier, kExprF64x2Le,
+ kExprF64x2Splat, LessEqual);
+}
+
WASM_SIMD_TEST(F64x2Min) {
RunF64x2BinOpTest(execution_tier, kExprF64x2Min, JSMin);
}
@@ -1263,8 +1411,8 @@ void RunExtAddPairwiseTest(TestExecutionTier execution_tier,
for (auto i = v.begin(), j = v.end() - 1; i < v.end(); i++, j--) {
r.Call(*i, *j);
Wide expected = AddLong<Wide>(*i, *j);
- for (int i = 0; i < num_lanes; i++) {
- CHECK_EQ(expected, LANE(g, i));
+ for (int l = 0; l < num_lanes; l++) {
+ CHECK_EQ(expected, LANE(g, l));
}
}
}
@@ -2299,27 +2447,29 @@ WASM_SIMD_TEST(I8x16Swizzle) {
// [0-15] and [16-31]. Using [0-15] as the indices will not sufficiently test
// swizzle since the expected result is a no-op, using [16-31] will result in
// all 0s.
- WasmRunner<int32_t> r(execution_tier);
- static const int kElems = kSimd128Size / sizeof(uint8_t);
- uint8_t* dst = r.builder().AddGlobal<uint8_t>(kWasmS128);
- uint8_t* src0 = r.builder().AddGlobal<uint8_t>(kWasmS128);
- uint8_t* src1 = r.builder().AddGlobal<uint8_t>(kWasmS128);
- BUILD(
- r,
- WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(kExprI8x16Swizzle, WASM_GLOBAL_GET(1),
- WASM_GLOBAL_GET(2))),
- WASM_ONE);
+ {
+ WasmRunner<int32_t> r(execution_tier);
+ static const int kElems = kSimd128Size / sizeof(uint8_t);
+ uint8_t* dst = r.builder().AddGlobal<uint8_t>(kWasmS128);
+ uint8_t* src0 = r.builder().AddGlobal<uint8_t>(kWasmS128);
+ uint8_t* src1 = r.builder().AddGlobal<uint8_t>(kWasmS128);
+ BUILD(r,
+ WASM_GLOBAL_SET(0,
+ WASM_SIMD_BINOP(kExprI8x16Swizzle, WASM_GLOBAL_GET(1),
+ WASM_GLOBAL_GET(2))),
+ WASM_ONE);
- for (SwizzleTestArgs si : swizzle_test_vector) {
- for (int i = 0; i < kElems; i++) {
- LANE(src0, i) = si.input[i];
- LANE(src1, i) = si.indices[i];
- }
+ for (SwizzleTestArgs si : swizzle_test_vector) {
+ for (int i = 0; i < kElems; i++) {
+ LANE(src0, i) = si.input[i];
+ LANE(src1, i) = si.indices[i];
+ }
- CHECK_EQ(1, r.Call());
+ CHECK_EQ(1, r.Call());
- for (int i = 0; i < kElems; i++) {
- CHECK_EQ(LANE(dst, i), si.expected[i]);
+ for (int i = 0; i < kElems; i++) {
+ CHECK_EQ(LANE(dst, i), si.expected[i]);
+ }
}
}
@@ -2448,8 +2598,8 @@ WASM_SIMD_TEST(S8x16MultiShuffleFuzz) {
// Run the SIMD or scalar lowered compiled code and compare results.
std::array<int8_t, kSimd128Size> result;
RunWasmCode(execution_tier, buffer, &result);
- for (size_t i = 0; i < kSimd128Size; ++i) {
- CHECK_EQ(result[i], expected[i]);
+ for (size_t j = 0; j < kSimd128Size; ++j) {
+ CHECK_EQ(result[j], expected[j]);
}
}
}
@@ -2471,28 +2621,28 @@ WASM_SIMD_TEST(S8x16MultiShuffleFuzz) {
WASM_LOCAL_GET(zero), \
WASM_LOCAL_GET(zero)))), \
WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
- WASM_RETURN1(WASM_ZERO)), \
+ WASM_RETURN(WASM_ZERO)), \
WASM_LOCAL_SET( \
reduced, WASM_SIMD_UNOP(kExprV128AnyTrue, \
WASM_SIMD_BINOP(kExprI##format##Ne, \
WASM_LOCAL_GET(zero), \
WASM_LOCAL_GET(zero)))), \
WASM_IF(WASM_I32_NE(WASM_LOCAL_GET(reduced), WASM_ZERO), \
- WASM_RETURN1(WASM_ZERO)), \
+ WASM_RETURN(WASM_ZERO)), \
WASM_LOCAL_SET( \
reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
WASM_SIMD_BINOP(kExprI##format##Eq, \
WASM_LOCAL_GET(zero), \
WASM_LOCAL_GET(zero)))), \
WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
- WASM_RETURN1(WASM_ZERO)), \
+ WASM_RETURN(WASM_ZERO)), \
WASM_LOCAL_SET( \
reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
WASM_SIMD_BINOP(kExprI##format##Ne, \
WASM_LOCAL_GET(zero), \
WASM_LOCAL_GET(zero)))), \
WASM_IF(WASM_I32_NE(WASM_LOCAL_GET(reduced), WASM_ZERO), \
- WASM_RETURN1(WASM_ZERO)), \
+ WASM_RETURN(WASM_ZERO)), \
WASM_LOCAL_SET(one_one, \
WASM_SIMD_I##format##_REPLACE_LANE( \
lanes - 1, WASM_LOCAL_GET(zero), int_type(1))), \
@@ -2502,28 +2652,28 @@ WASM_SIMD_TEST(S8x16MultiShuffleFuzz) {
WASM_LOCAL_GET(one_one), \
WASM_LOCAL_GET(zero)))), \
WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
- WASM_RETURN1(WASM_ZERO)), \
+ WASM_RETURN(WASM_ZERO)), \
WASM_LOCAL_SET( \
reduced, WASM_SIMD_UNOP(kExprV128AnyTrue, \
WASM_SIMD_BINOP(kExprI##format##Ne, \
WASM_LOCAL_GET(one_one), \
WASM_LOCAL_GET(zero)))), \
WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(reduced), WASM_ZERO), \
- WASM_RETURN1(WASM_ZERO)), \
+ WASM_RETURN(WASM_ZERO)), \
WASM_LOCAL_SET( \
reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
WASM_SIMD_BINOP(kExprI##format##Eq, \
WASM_LOCAL_GET(one_one), \
WASM_LOCAL_GET(zero)))), \
WASM_IF(WASM_I32_NE(WASM_LOCAL_GET(reduced), WASM_ZERO), \
- WASM_RETURN1(WASM_ZERO)), \
+ WASM_RETURN(WASM_ZERO)), \
WASM_LOCAL_SET( \
reduced, WASM_SIMD_UNOP(kExprI##format##AllTrue, \
WASM_SIMD_BINOP(kExprI##format##Ne, \
WASM_LOCAL_GET(one_one), \
WASM_LOCAL_GET(zero)))), \
WASM_IF(WASM_I32_NE(WASM_LOCAL_GET(reduced), WASM_ZERO), \
- WASM_RETURN1(WASM_ZERO)), \
+ WASM_RETURN(WASM_ZERO)), \
WASM_ONE); \
CHECK_EQ(1, r.Call()); \
}
@@ -2785,18 +2935,21 @@ WASM_SIMD_TEST(SimdF32x4SetGlobal) {
}
WASM_SIMD_TEST(SimdLoadStoreLoad) {
- WasmRunner<int32_t> r(execution_tier);
- int32_t* memory =
- r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
- // Load memory, store it, then reload it and extract the first lane. Use a
- // non-zero offset into the memory of 1 lane (4 bytes) to test indexing.
- BUILD(r, WASM_SIMD_STORE_MEM(WASM_I32V(8), WASM_SIMD_LOAD_MEM(WASM_I32V(4))),
- WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_I32V(8))));
+ {
+ WasmRunner<int32_t> r(execution_tier);
+ int32_t* memory =
+ r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
+ // Load memory, store it, then reload it and extract the first lane. Use a
+ // non-zero offset into the memory of 1 lane (4 bytes) to test indexing.
+ BUILD(r,
+ WASM_SIMD_STORE_MEM(WASM_I32V(8), WASM_SIMD_LOAD_MEM(WASM_I32V(4))),
+ WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_LOAD_MEM(WASM_I32V(8))));
- FOR_INT32_INPUTS(i) {
- int32_t expected = i;
- r.builder().WriteMemory(&memory[1], expected);
- CHECK_EQ(expected, r.Call());
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = i;
+ r.builder().WriteMemory(&memory[1], expected);
+ CHECK_EQ(expected, r.Call());
+ }
}
{
@@ -2828,25 +2981,28 @@ WASM_SIMD_TEST(SimdLoadStoreLoad) {
}
WASM_SIMD_TEST(SimdLoadStoreLoadMemargOffset) {
- WasmRunner<int32_t> r(execution_tier);
- int32_t* memory =
- r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
- constexpr byte offset_1 = 4;
- constexpr byte offset_2 = 8;
- // Load from memory at offset_1, store to offset_2, load from offset_2, and
- // extract first lane. We use non-zero memarg offsets to test offset decoding.
- BUILD(
- r,
- WASM_SIMD_STORE_MEM_OFFSET(
- offset_2, WASM_ZERO, WASM_SIMD_LOAD_MEM_OFFSET(offset_1, WASM_ZERO)),
- WASM_SIMD_I32x4_EXTRACT_LANE(
- 0, WASM_SIMD_LOAD_MEM_OFFSET(offset_2, WASM_ZERO)));
-
- FOR_INT32_INPUTS(i) {
- int32_t expected = i;
- // Index 1 of memory (int32_t) will be bytes 4 to 8.
- r.builder().WriteMemory(&memory[1], expected);
- CHECK_EQ(expected, r.Call());
+ {
+ WasmRunner<int32_t> r(execution_tier);
+ int32_t* memory =
+ r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
+ constexpr byte offset_1 = 4;
+ constexpr byte offset_2 = 8;
+ // Load from memory at offset_1, store to offset_2, load from offset_2, and
+ // extract first lane. We use non-zero memarg offsets to test offset
+ // decoding.
+ BUILD(r,
+ WASM_SIMD_STORE_MEM_OFFSET(
+ offset_2, WASM_ZERO,
+ WASM_SIMD_LOAD_MEM_OFFSET(offset_1, WASM_ZERO)),
+ WASM_SIMD_I32x4_EXTRACT_LANE(
+ 0, WASM_SIMD_LOAD_MEM_OFFSET(offset_2, WASM_ZERO)));
+
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = i;
+ // Index 1 of memory (int32_t) will be bytes 4 to 8.
+ r.builder().WriteMemory(&memory[1], expected);
+ CHECK_EQ(expected, r.Call());
+ }
}
{
@@ -2903,18 +3059,20 @@ template <typename T>
void RunLoadSplatTest(TestExecutionTier execution_tier, WasmOpcode op) {
constexpr int lanes = 16 / sizeof(T);
constexpr int mem_index = 16; // Load from mem index 16 (bytes).
- WasmRunner<int32_t> r(execution_tier);
- T* memory = r.builder().AddMemoryElems<T>(kWasmPageSize / sizeof(T));
- T* global = r.builder().AddGlobal<T>(kWasmS128);
- BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP(op, WASM_I32V(mem_index))),
- WASM_ONE);
+ {
+ WasmRunner<int32_t> r(execution_tier);
+ T* memory = r.builder().AddMemoryElems<T>(kWasmPageSize / sizeof(T));
+ T* global = r.builder().AddGlobal<T>(kWasmS128);
+ BUILD(r, WASM_GLOBAL_SET(0, WASM_SIMD_LOAD_OP(op, WASM_I32V(mem_index))),
+ WASM_ONE);
- for (T x : compiler::ValueHelper::GetVector<T>()) {
- // 16-th byte in memory is lanes-th element (size T) of memory.
- r.builder().WriteMemory(&memory[lanes], x);
- r.Call();
- for (int i = 0; i < lanes; i++) {
- CHECK_EQ(x, LANE(global, i));
+ for (T x : compiler::ValueHelper::GetVector<T>()) {
+ // 16-th byte in memory is lanes-th element (size T) of memory.
+ r.builder().WriteMemory(&memory[lanes], x);
+ r.Call();
+ for (int i = 0; i < lanes; i++) {
+ CHECK_EQ(x, LANE(global, i));
+ }
}
}
@@ -3557,6 +3715,30 @@ WASM_SIMD_TEST(AddExtAddPairwiseI32LeftUnsigned) {
kExprI32x4ExtAddPairwiseI16x8U, {1, 2, 3, 4, 5, 6, 7, 8}, {4, 9, 14, 19});
}
+// Regression test from https://crbug.com/v8/12237 to exercise a codegen bug
+// for i64x2.gts which overwrote one of the inputs.
+WASM_SIMD_TEST(Regress_12237) {
+ WasmRunner<int32_t, int64_t> r(execution_tier);
+ int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+ byte value = 0;
+ byte temp = r.AllocateLocal(kWasmS128);
+ int64_t local = 123;
+ BUILD(r,
+ WASM_LOCAL_SET(temp,
+ WASM_SIMD_OPN(kExprI64x2Splat, WASM_LOCAL_GET(value))),
+ WASM_GLOBAL_SET(
+ 0,
+ WASM_SIMD_BINOP(kExprI64x2GtS, WASM_LOCAL_GET(temp),
+ WASM_SIMD_BINOP(kExprI64x2Sub, WASM_LOCAL_GET(temp),
+ WASM_LOCAL_GET(temp)))),
+ WASM_ONE);
+ r.Call(local);
+ int64_t expected = Greater(local, local - local);
+ for (size_t i = 0; i < kSimd128Size / sizeof(int64_t); i++) {
+ CHECK_EQ(expected, LANE(g, 0));
+ }
+}
+
#define WASM_EXTRACT_I16x8_TEST(Sign, Type) \
WASM_SIMD_TEST(I16X8ExtractLane##Sign) { \
WasmRunner<int32_t, int32_t> r(execution_tier); \
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index e0ef82d8d7..9e9a6d8c30 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -180,6 +180,65 @@ static void TestInt32Binop(TestExecutionTier execution_tier, WasmOpcode opcode,
}
}
}
+ FOR_INT32_INPUTS(i) {
+ WasmRunner<ctype, ctype> r(execution_tier);
+ // Apply {opcode} on constant and parameter.
+ BUILD(r, WASM_BINOP(opcode, WASM_I32V(i), WASM_LOCAL_GET(0)));
+ FOR_INT32_INPUTS(j) {
+ CHECK_EQ(expected(i, j), r.Call(j));
+ }
+ }
+ FOR_INT32_INPUTS(j) {
+ WasmRunner<ctype, ctype> r(execution_tier);
+ // Apply {opcode} on parameter and constant.
+ BUILD(r, WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_I32V(j)));
+ FOR_INT32_INPUTS(i) {
+ CHECK_EQ(expected(i, j), r.Call(i));
+ }
+ }
+ auto to_bool = [](ctype value) -> ctype {
+ return value == static_cast<ctype>(0xDEADBEEF) ? value : !!value;
+ };
+ FOR_INT32_INPUTS(i) {
+ WasmRunner<ctype, ctype> r(execution_tier);
+ // Apply {opcode} on constant and parameter, followed by {if}.
+ BUILD(r, WASM_IF(WASM_BINOP(opcode, WASM_I32V(i), WASM_LOCAL_GET(0)),
+ WASM_RETURN(WASM_ONE)),
+ WASM_ZERO);
+ FOR_INT32_INPUTS(j) {
+ CHECK_EQ(to_bool(expected(i, j)), r.Call(j));
+ }
+ }
+ FOR_INT32_INPUTS(j) {
+ WasmRunner<ctype, ctype> r(execution_tier);
+ // Apply {opcode} on parameter and constant, followed by {if}.
+ BUILD(r, WASM_IF(WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_I32V(j)),
+ WASM_RETURN(WASM_ONE)),
+ WASM_ZERO);
+ FOR_INT32_INPUTS(i) {
+ CHECK_EQ(to_bool(expected(i, j)), r.Call(i));
+ }
+ }
+ FOR_INT32_INPUTS(i) {
+ WasmRunner<ctype, ctype> r(execution_tier);
+ // Apply {opcode} on constant and parameter, followed by {br_if}.
+ BUILD(r, WASM_BR_IFD(0, WASM_ONE,
+ WASM_BINOP(opcode, WASM_I32V(i), WASM_LOCAL_GET(0))),
+ WASM_ZERO);
+ FOR_INT32_INPUTS(j) {
+ CHECK_EQ(to_bool(expected(i, j)), r.Call(j));
+ }
+ }
+ FOR_INT32_INPUTS(j) {
+ WasmRunner<ctype, ctype> r(execution_tier);
+ // Apply {opcode} on parameter and constant, followed by {br_if}.
+ BUILD(r, WASM_BR_IFD(0, WASM_ONE,
+ WASM_BINOP(opcode, WASM_LOCAL_GET(0), WASM_I32V(j))),
+ WASM_ZERO);
+ FOR_INT32_INPUTS(i) {
+ CHECK_EQ(to_bool(expected(i, j)), r.Call(i));
+ }
+ }
}
// clang-format on
@@ -861,7 +920,7 @@ WASM_EXEC_TEST(Br_height) {
WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(
WASM_BLOCK(WASM_BRV_IFD(0, WASM_LOCAL_GET(0), WASM_LOCAL_GET(0)),
- WASM_RETURN1(WASM_I32V_1(9))),
+ WASM_RETURN(WASM_I32V_1(9))),
WASM_BRV(0, WASM_I32V_1(8))));
for (int32_t i = 0; i < 5; i++) {
@@ -1414,7 +1473,7 @@ WASM_EXEC_TEST(ExprIf_P) {
WASM_EXEC_TEST(CountDown) {
WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r,
- WASM_LOOP(WASM_IFB(
+ WASM_LOOP(WASM_IF(
WASM_LOCAL_GET(0),
WASM_LOCAL_SET(0, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_I32V_1(1))),
WASM_BR(1))),
@@ -1589,10 +1648,10 @@ WASM_EXEC_TEST(LoadMem_offset_oob) {
r.builder().AddMemoryElems<byte>(num_bytes);
r.builder().RandomizeMemory(1116 + static_cast<int>(m));
- constexpr byte offset = 8;
- uint32_t boundary = num_bytes - offset - machineTypes[m].MemSize();
+ constexpr byte kOffset = 8;
+ uint32_t boundary = num_bytes - kOffset - machineTypes[m].MemSize();
- BUILD(r, WASM_LOAD_MEM_OFFSET(machineTypes[m], offset, WASM_LOCAL_GET(0)),
+ BUILD(r, WASM_LOAD_MEM_OFFSET(machineTypes[m], kOffset, WASM_LOCAL_GET(0)),
WASM_DROP, WASM_ZERO);
CHECK_EQ(0, r.Call(boundary)); // in bounds.
@@ -2684,12 +2743,6 @@ UNINITIALIZED_WASM_EXEC_TEST(ReturnCall_Bounce_Sum) {
}
}
-#define ADD_CODE(vec, ...) \
- do { \
- byte __buf[] = {__VA_ARGS__}; \
- for (size_t i = 0; i < sizeof(__buf); ++i) vec.push_back(__buf[i]); \
- } while (false)
-
static void Run_WasmMixedCall_N(TestExecutionTier execution_tier, int start) {
const int kExpected = 6333;
const int kElemSize = 8;
@@ -2719,8 +2772,8 @@ static void Run_WasmMixedCall_N(TestExecutionTier execution_tier, int start) {
for (int i = 0; i < num_params; ++i) {
b.AddParam(ValueType::For(memtypes[i]));
}
- WasmFunctionCompiler& t = r.NewFunction(b.Build());
- BUILD(t, WASM_LOCAL_GET(which));
+ WasmFunctionCompiler& f = r.NewFunction(b.Build());
+ BUILD(f, WASM_LOCAL_GET(which));
// =========================================================================
// Build the calling function.
@@ -2734,7 +2787,7 @@ static void Run_WasmMixedCall_N(TestExecutionTier execution_tier, int start) {
}
// Call the selector function.
- ADD_CODE(code, WASM_CALL_FUNCTION0(t.function_index()));
+ ADD_CODE(code, WASM_CALL_FUNCTION0(f.function_index()));
// Store the result in a local.
byte local_index = r.AllocateLocal(ValueType::For(result));
@@ -2758,8 +2811,8 @@ static void Run_WasmMixedCall_N(TestExecutionTier execution_tier, int start) {
for (int i = 0; i < size; ++i) {
int base = (which + 1) * kElemSize;
byte expected = r.builder().raw_mem_at<byte>(base + i);
- byte result = r.builder().raw_mem_at<byte>(i);
- CHECK_EQ(expected, result);
+ byte actual = r.builder().raw_mem_at<byte>(i);
+ CHECK_EQ(expected, actual);
}
}
}
@@ -3522,7 +3575,7 @@ WASM_EXEC_TEST(InvalidStackAfterBr) {
WASM_EXEC_TEST(InvalidStackAfterReturn) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r, WASM_RETURN1(WASM_I32V_1(17)), kExprI32Add);
+ BUILD(r, WASM_RETURN(WASM_I32V_1(17)), kExprI32Add);
CHECK_EQ(17, r.Call());
}
@@ -3580,15 +3633,14 @@ WASM_EXEC_TEST(BranchOverUnreachableCodeInLoop2) {
WASM_EXEC_TEST(BlockInsideUnreachable) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(r, WASM_RETURN1(WASM_I32V_1(17)), WASM_BLOCK(WASM_BR(0)));
+ BUILD(r, WASM_RETURN(WASM_I32V_1(17)), WASM_BLOCK(WASM_BR(0)));
CHECK_EQ(17, r.Call());
}
WASM_EXEC_TEST(IfInsideUnreachable) {
WasmRunner<int32_t> r(execution_tier);
- BUILD(
- r, WASM_RETURN1(WASM_I32V_1(17)),
- WASM_IF_ELSE_I(WASM_ONE, WASM_BRV(0, WASM_ONE), WASM_RETURN1(WASM_ONE)));
+ BUILD(r, WASM_RETURN(WASM_I32V_1(17)),
+ WASM_IF_ELSE_I(WASM_ONE, WASM_BRV(0, WASM_ONE), WASM_RETURN(WASM_ONE)));
CHECK_EQ(17, r.Call());
}
@@ -3899,7 +3951,7 @@ TEST(Liftoff_tier_up) {
{
CodeSpaceWriteScope write_scope(native_module);
std::unique_ptr<WasmCode> new_code = native_module->AddCode(
- add.function_index(), desc, 0, 0, {}, {}, WasmCode::kFunction,
+ add.function_index(), desc, 0, 0, {}, {}, WasmCode::kWasmFunction,
ExecutionTier::kTurbofan, kNoDebugging);
native_module->PublishCode(std::move(new_code));
}
@@ -3932,7 +3984,6 @@ TEST(Regression_1185323_1185492) {
#undef B2
#undef RET
#undef RET_I8
-#undef ADD_CODE
} // namespace test_run_wasm
} // namespace wasm
diff --git a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
index f5ef3d9b76..786d32a649 100644
--- a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
+++ b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
@@ -20,6 +20,7 @@
#include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
+#include "test/common/wasm/wasm-module-runner.h"
namespace v8 {
namespace internal {
@@ -1108,6 +1109,105 @@ STREAM_TEST(TestModuleWithImportedFunction) {
CHECK(tester.IsPromiseFulfilled());
}
+STREAM_TEST(TestIncrementalCaching) {
+ FLAG_VALUE_SCOPE(wasm_dynamic_tiering, true);
+ FLAG_VALUE_SCOPE(wasm_tier_up, false);
+ constexpr int threshold = 10;
+ FlagScope<int> caching_treshold(&FLAG_wasm_caching_threshold, threshold);
+ StreamTester tester(isolate);
+ int call_cache_counter = 0;
+ tester.stream()->SetModuleCompiledCallback(
+ [&call_cache_counter](
+ const std::shared_ptr<i::wasm::NativeModule>& native_module) {
+ call_cache_counter++;
+ });
+
+ ZoneBuffer buffer(tester.zone());
+ TestSignatures sigs;
+ WasmModuleBuilder builder(tester.zone());
+ builder.SetMinMemorySize(1);
+
+ base::Vector<const char> function_names[] = {
+ base::CStrVector("f0"), base::CStrVector("f1"), base::CStrVector("f2")};
+ for (int i = 0; i < 3; ++i) {
+ WasmFunctionBuilder* f = builder.AddFunction(sigs.v_v());
+
+ constexpr int64_t val = 0x123456789abc;
+ constexpr int index = 0x1234;
+ uint8_t store_mem[] = {
+ WASM_STORE_MEM(MachineType::Int64(), WASM_I32V(index), WASM_I64V(val))};
+ constexpr uint32_t kStoreLength = 20;
+ CHECK_EQ(kStoreLength, arraysize(store_mem));
+
+ // Produce a store {threshold} many times to reach the caching threshold.
+ constexpr uint32_t kCodeLength = kStoreLength * threshold + 1;
+ uint8_t code[kCodeLength];
+ for (int j = 0; j < threshold; ++j) {
+ memcpy(code + (j * kStoreLength), store_mem, kStoreLength);
+ }
+ code[kCodeLength - 1] = WasmOpcode::kExprEnd;
+ f->EmitCode(code, kCodeLength);
+ builder.AddExport(function_names[i], f);
+ }
+ builder.WriteTo(&buffer);
+ tester.OnBytesReceived(buffer.begin(), buffer.end() - buffer.begin());
+ tester.FinishStream();
+ tester.RunCompilerTasks();
+ CHECK(tester.IsPromiseFulfilled());
+ tester.native_module();
+ constexpr base::Vector<const char> kNoSourceUrl{"", 0};
+ Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ Handle<Script> script = GetWasmEngine()->GetOrCreateScript(
+ i_isolate, tester.native_module(), kNoSourceUrl);
+ Handle<FixedArray> export_wrappers = i_isolate->factory()->NewFixedArray(3);
+ Handle<WasmModuleObject> module_object = WasmModuleObject::New(
+ i_isolate, tester.native_module(), script, export_wrappers);
+ ErrorThrower thrower(i_isolate, "Instantiation");
+ // We instantiated before, so the second instantiation must also succeed:
+ Handle<WasmInstanceObject> instance =
+ GetWasmEngine()
+ ->SyncInstantiate(i_isolate, &thrower, module_object, {}, {})
+ .ToHandleChecked();
+ CHECK(!thrower.error());
+
+ WasmCodeRefScope code_scope;
+ CHECK(tester.native_module()->GetCode(0)->is_liftoff());
+ CHECK(tester.native_module()->GetCode(1)->is_liftoff());
+ CHECK(tester.native_module()->GetCode(2)->is_liftoff());
+ // No TurboFan compilation happened yet, and therefore no call to the cache.
+ CHECK_EQ(0, call_cache_counter);
+ bool exception = false;
+ // The tier-up threshold is hard-coded right now.
+ constexpr int tier_up_threshold = 4;
+ for (int i = 0; i < tier_up_threshold; ++i) {
+ testing::CallWasmFunctionForTesting(i_isolate, instance, "f0", 0, nullptr,
+ &exception);
+ }
+ tester.RunCompilerTasks();
+ CHECK(!tester.native_module()->GetCode(0)->is_liftoff());
+ CHECK(tester.native_module()->GetCode(1)->is_liftoff());
+ CHECK(tester.native_module()->GetCode(2)->is_liftoff());
+ CHECK_EQ(1, call_cache_counter);
+ size_t serialized_size;
+ {
+ i::wasm::WasmSerializer serializer(tester.native_module().get());
+ serialized_size = serializer.GetSerializedNativeModuleSize();
+ }
+ for (int i = 0; i < tier_up_threshold; ++i) {
+ testing::CallWasmFunctionForTesting(i_isolate, instance, "f1", 0, nullptr,
+ &exception);
+ }
+ tester.RunCompilerTasks();
+ CHECK(!tester.native_module()->GetCode(0)->is_liftoff());
+ CHECK(!tester.native_module()->GetCode(1)->is_liftoff());
+ CHECK(tester.native_module()->GetCode(2)->is_liftoff());
+ CHECK_EQ(2, call_cache_counter);
+ {
+ i::wasm::WasmSerializer serializer(tester.native_module().get());
+ CHECK_LT(serialized_size, serializer.GetSerializedNativeModuleSize());
+ }
+}
+
STREAM_TEST(TestModuleWithErrorAfterDataSection) {
StreamTester tester(isolate);
@@ -1264,19 +1364,19 @@ STREAM_TEST(TestCompileErrorFunctionName) {
};
const uint8_t bytes_names[] = {
- kUnknownSectionCode, // section code
- U32V_1(11), // section size
- 4, // section name length
- 'n', // section name
- 'a', // section name
- 'm', // section name
- 'e', // section name
- NameSectionKindCode::kFunction, // name section kind
- 4, // name section kind length
- 1, // num function names
- 0, // function index
- 1, // function name length
- 'f', // function name
+ kUnknownSectionCode, // section code
+ U32V_1(11), // section size
+ 4, // section name length
+ 'n', // section name
+ 'a', // section name
+ 'm', // section name
+ 'e', // section name
+ NameSectionKindCode::kFunctionCode, // name section kind
+ 4, // name section kind length
+ 1, // num function names
+ 0, // function index
+ 1, // function name length
+ 'f', // function name
};
for (bool late_names : {false, true}) {
diff --git a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
index afa1040cdd..853817a276 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
@@ -328,7 +328,7 @@ WASM_COMPILED_EXEC_TEST(WasmNonBreakablePosition) {
WasmRunner<int> runner(execution_tier);
Isolate* isolate = runner.main_isolate();
- BUILD(runner, WASM_RETURN1(WASM_I32V_2(1024)));
+ BUILD(runner, WASM_RETURN(WASM_I32V_2(1024)));
Handle<JSFunction> main_fun_wrapper =
runner.builder().WrapCode(runner.function_index());
@@ -377,7 +377,7 @@ WASM_COMPILED_EXEC_TEST(WasmStepInAndOut) {
// functions in the code section matches the function indexes.
// return arg0
- BUILD(runner, WASM_RETURN1(WASM_LOCAL_GET(0)));
+ BUILD(runner, WASM_RETURN(WASM_LOCAL_GET(0)));
// for (int i = 0; i < 10; ++i) { f2(i); }
BUILD(f2, WASM_LOOP(
WASM_BR_IF(0, WASM_BINOP(kExprI32GeU, WASM_LOCAL_GET(0),
diff --git a/deps/v8/test/cctest/wasm/test-wasm-stack.cc b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
index a5179c04ca..0c9ceeb38b 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-stack.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
@@ -173,8 +173,8 @@ WASM_COMPILED_EXEC_TEST(CollectDetailedWasmStack_WasmUrl) {
// Create a WasmRunner with stack checks and traps enabled.
WasmRunner<int> r(execution_tier, nullptr, "main", kRuntimeExceptionSupport);
- std::vector<byte> code(1, kExprUnreachable);
- r.Build(code.data(), code.data() + code.size());
+ std::vector<byte> trap_code(1, kExprUnreachable);
+ r.Build(trap_code.data(), trap_code.data() + trap_code.size());
WasmFunctionCompiler& f = r.NewFunction<int>("call_main");
BUILD(f, WASM_CALL_FUNCTION0(0));
@@ -235,9 +235,9 @@ WASM_COMPILED_EXEC_TEST(CollectDetailedWasmStack_WasmError) {
WasmRunner<int> r(execution_tier, nullptr, "main",
kRuntimeExceptionSupport);
- std::vector<byte> code(unreachable_pos + 1, kExprNop);
- code[unreachable_pos] = kExprUnreachable;
- r.Build(code.data(), code.data() + code.size());
+ std::vector<byte> trap_code(unreachable_pos + 1, kExprNop);
+ trap_code[unreachable_pos] = kExprUnreachable;
+ r.Build(trap_code.data(), trap_code.data() + trap_code.size());
uint32_t wasm_index_1 = r.function()->func_index;
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index 0c44578db4..1bbb2d1ac2 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -220,7 +220,8 @@ void TestingModuleBuilder::AddIndirectFunctionTable(
instance_object(), table_index, table_size);
Handle<WasmTableObject> table_obj =
WasmTableObject::New(isolate_, instance, table.type, table.initial_size,
- table.has_maximum_size, table.maximum_size, nullptr);
+ table.has_maximum_size, table.maximum_size, nullptr,
+ isolate_->factory()->null_value());
WasmTableObject::AddDispatchTable(isolate_, table_obj, instance_object_,
table_index);
@@ -335,7 +336,8 @@ uint32_t TestingModuleBuilder::AddPassiveElementSegment(
CompilationEnv TestingModuleBuilder::CreateCompilationEnv() {
return {test_module_.get(), native_module_->bounds_checks(),
- runtime_exception_support_, enabled_features_};
+ runtime_exception_support_, enabled_features_,
+ DynamicTiering::kDisabled};
}
const WasmGlobal* TestingModuleBuilder::AddGlobal(ValueType type) {
@@ -382,10 +384,9 @@ void TestBuildingGraphWithBuilder(compiler::WasmGraphBuilder* builder,
WasmFeatures unused_detected_features;
FunctionBody body(sig, 0, start, end);
std::vector<compiler::WasmLoopInfo> loops;
- DecodeResult result =
- BuildTFGraph(zone->allocator(), WasmFeatures::All(), nullptr, builder,
- &unused_detected_features, body, &loops, nullptr, 0,
- kInstrumentEndpoints);
+ DecodeResult result = BuildTFGraph(
+ zone->allocator(), WasmFeatures::All(), nullptr, builder,
+ &unused_detected_features, body, &loops, nullptr, 0, kRegularFunction);
if (result.failed()) {
#ifdef DEBUG
if (!FLAG_trace_wasm_decoder) {
@@ -393,7 +394,7 @@ void TestBuildingGraphWithBuilder(compiler::WasmGraphBuilder* builder,
FLAG_trace_wasm_decoder = true;
result = BuildTFGraph(zone->allocator(), WasmFeatures::All(), nullptr,
builder, &unused_detected_features, body, &loops,
- nullptr, 0, kInstrumentEndpoints);
+ nullptr, 0, kRegularFunction);
}
#endif
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index 8f6bb6074f..f5a3ce2389 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -84,10 +84,17 @@ using compiler::Node;
#define WASM_WRAPPER_RETURN_VALUE 8754
-#define BUILD(r, ...) \
- do { \
- byte code[] = {__VA_ARGS__}; \
- r.Build(code, code + arraysize(code)); \
+#define BUILD(r, ...) \
+ do { \
+ byte __code[] = {__VA_ARGS__}; \
+ r.Build(__code, __code + arraysize(__code)); \
+ } while (false)
+
+#define ADD_CODE(vec, ...) \
+ do { \
+ byte __buf[] = {__VA_ARGS__}; \
+ for (size_t __i = 0; __i < sizeof(__buf); __i++) \
+ vec.push_back(__buf[__i]); \
} while (false)
// For tests that must manually import a JSFunction with source code.
@@ -132,7 +139,7 @@ class TestingModuleBuilder {
byte AddSignature(const FunctionSig* sig) {
DCHECK_EQ(test_module_->types.size(),
test_module_->canonicalized_type_ids.size());
- test_module_->add_signature(sig);
+ test_module_->add_signature(sig, kNoSuperType);
size_t size = test_module_->types.size();
CHECK_GT(127, size);
return static_cast<byte>(size - 1);
diff --git a/deps/v8/test/cctest/wasm/wasm-simd-utils.cc b/deps/v8/test/cctest/wasm/wasm-simd-utils.cc
index aa6b755d0d..aed5ccc5ec 100644
--- a/deps/v8/test/cctest/wasm/wasm-simd-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-simd-utils.cc
@@ -466,8 +466,8 @@ void RunF32x4UnOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
}
}
- FOR_FLOAT32_NAN_INPUTS(i) {
- float x = bit_cast<float>(nan_test_array[i]);
+ FOR_FLOAT32_NAN_INPUTS(f) {
+ float x = bit_cast<float>(nan_test_array[f]);
if (!PlatformCanRepresent(x)) continue;
// Extreme values have larger errors so skip them for approximation tests.
if (!exact && IsExtreme(x)) continue;
@@ -510,8 +510,8 @@ void RunF32x4BinOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
}
}
- FOR_FLOAT32_NAN_INPUTS(i) {
- float x = bit_cast<float>(nan_test_array[i]);
+ FOR_FLOAT32_NAN_INPUTS(f) {
+ float x = bit_cast<float>(nan_test_array[f]);
if (!PlatformCanRepresent(x)) continue;
FOR_FLOAT32_NAN_INPUTS(j) {
float y = bit_cast<float>(nan_test_array[j]);
@@ -630,8 +630,8 @@ void RunF64x2UnOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
}
}
- FOR_FLOAT64_NAN_INPUTS(i) {
- double x = bit_cast<double>(double_nan_test_array[i]);
+ FOR_FLOAT64_NAN_INPUTS(d) {
+ double x = bit_cast<double>(double_nan_test_array[d]);
if (!PlatformCanRepresent(x)) continue;
// Extreme values have larger errors so skip them for approximation tests.
if (!exact && IsExtreme(x)) continue;
@@ -674,8 +674,8 @@ void RunF64x2BinOpTest(TestExecutionTier execution_tier, WasmOpcode opcode,
}
}
- FOR_FLOAT64_NAN_INPUTS(i) {
- double x = bit_cast<double>(double_nan_test_array[i]);
+ FOR_FLOAT64_NAN_INPUTS(d) {
+ double x = bit_cast<double>(double_nan_test_array[d]);
if (!PlatformCanRepresent(x)) continue;
FOR_FLOAT64_NAN_INPUTS(j) {
double y = bit_cast<double>(double_nan_test_array[j]);
diff --git a/deps/v8/test/common/wasm/wasm-interpreter.cc b/deps/v8/test/common/wasm/wasm-interpreter.cc
index a0beab1a90..13fad94adc 100644
--- a/deps/v8/test/common/wasm/wasm-interpreter.cc
+++ b/deps/v8/test/common/wasm/wasm-interpreter.cc
@@ -12,6 +12,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/common/globals.h"
#include "src/compiler/wasm-compiler.h"
+#include "src/handles/global-handles-inl.h"
#include "src/numbers/conversions.h"
#include "src/objects/objects-inl.h"
#include "src/utils/boxed-float.h"
@@ -659,19 +660,20 @@ class SideTable : public ZoneObject {
auto p = map->catch_map.emplace(
offset, ZoneVector<CatchControlTransferEntry>(zone));
auto& catch_entries = p.first->second;
- for (auto& p : catch_targets) {
- auto pcdiff = static_cast<pcdiff_t>(p.pc - ref.from_pc);
+ for (auto& catch_target : catch_targets) {
+ auto pcdiff =
+ static_cast<pcdiff_t>(catch_target.pc - ref.from_pc);
TRACE(
"control transfer @%zu: Δpc %d, stack %u->%u, exn: %d = "
"-%u\n",
offset, pcdiff, ref.stack_height, target_stack_height,
- p.tag_index, spdiff);
+ catch_target.tag_index, spdiff);
CatchControlTransferEntry entry;
entry.pc_diff = pcdiff;
entry.sp_diff = spdiff;
entry.target_arity = arity;
- entry.tag_index = p.tag_index;
- entry.target_control_index = p.target_control_index;
+ entry.tag_index = catch_target.tag_index;
+ entry.target_control_index = catch_target.target_control_index;
catch_entries.emplace_back(entry);
}
}
@@ -927,18 +929,20 @@ class SideTable : public ZoneObject {
// Bind else label for one-armed if.
c->else_label->Bind(i.pc());
} else if (!exception_stack.empty()) {
+ DCHECK_IMPLIES(
+ !unreachable,
+ stack_height >= c->else_label->target_stack_height);
// No catch_all block, prepare for implicit rethrow.
if (exception_stack.back() == control_stack.size() - 1) {
// Close try scope for catch-less try.
exception_stack.pop_back();
+ copy_unreachable();
+ unreachable = control_stack.back().unreachable;
}
DCHECK_EQ(*c->pc, kExprTry);
constexpr int kUnusedControlIndex = -1;
c->else_label->Bind(i.pc(), kRethrowOrDelegateExceptionIndex,
kUnusedControlIndex);
- DCHECK_IMPLIES(
- !unreachable,
- stack_height >= c->else_label->target_stack_height);
stack_height = c->else_label->target_stack_height;
rethrow = !unreachable && !exception_stack.empty();
}
@@ -968,17 +972,20 @@ class SideTable : public ZoneObject {
Control* c = &control_stack.back();
const size_t new_stack_size = control_stack.size() - 1;
const size_t max_depth = new_stack_size - 1;
- size_t target_depth = imm.depth;
- while (target_depth < max_depth &&
- *control_stack[max_depth - target_depth].pc != kExprTry) {
- target_depth++;
+ // Find the first try block that is equal to or encloses the target
+ // block, i.e. has a lower than or equal index in the control stack.
+ int try_index = static_cast<int>(exception_stack.size()) - 1;
+ while (try_index >= 0 &&
+ exception_stack[try_index] > max_depth - imm.depth) {
+ try_index--;
}
- if (target_depth < max_depth) {
+ if (try_index >= 0) {
+ size_t target_depth = exception_stack[try_index];
constexpr int kUnusedControlIndex = -1;
c->else_label->Bind(i.pc(), kRethrowOrDelegateExceptionIndex,
kUnusedControlIndex);
c->else_label->Finish(&map_, code->start);
- Control* target = &control_stack[max_depth - target_depth];
+ Control* target = &control_stack[target_depth];
DCHECK_EQ(*target->pc, kExprTry);
DCHECK_NOT_NULL(target->else_label);
if (!control_parent().unreachable) {
@@ -1998,7 +2005,6 @@ class WasmInterpreterInternals {
#else
constexpr bool kBigEndian = false;
#endif
- WasmValue result;
switch (opcode) {
#define ATOMIC_BINOP_CASE(name, type, op_type, operation, op) \
case kExpr##name: { \
@@ -2118,20 +2124,20 @@ class WasmInterpreterInternals {
ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange32U, uint32_t,
uint64_t);
#undef ATOMIC_COMPARE_EXCHANGE_CASE
-#define ATOMIC_LOAD_CASE(name, type, op_type, operation) \
- case kExpr##name: { \
- Address addr; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, \
- len)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- result = WasmValue(static_cast<op_type>(AdjustByteOrder<type>( \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr))))); \
- Push(result); \
- break; \
+#define ATOMIC_LOAD_CASE(name, type, op_type, operation) \
+ case kExpr##name: { \
+ Address addr; \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, \
+ len)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ WasmValue result = WasmValue(static_cast<op_type>(AdjustByteOrder<type>( \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr))))); \
+ Push(result); \
+ break; \
}
ATOMIC_LOAD_CASE(I32AtomicLoad, uint32_t, uint32_t, atomic_load);
ATOMIC_LOAD_CASE(I32AtomicLoad8U, uint8_t, uint32_t, atomic_load);
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index 4203c22696..14755e00cf 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -214,10 +214,6 @@
#define WASM_SELECT_A(tval, fval, cond) \
tval, fval, cond, kExprSelectWithType, U32V_1(1), kFuncRefCode
-#define WASM_RETURN0 kExprReturn
-#define WASM_RETURN1(val) val, kExprReturn
-#define WASM_RETURNN(count, ...) __VA_ARGS__, kExprReturn
-
#define WASM_BR(depth) kExprBr, static_cast<byte>(depth)
#define WASM_BR_IF(depth, cond) cond, kExprBrIf, static_cast<byte>(depth)
#define WASM_BR_IFD(depth, val, cond) \
@@ -225,13 +221,11 @@
#define WASM_CONTINUE(depth) kExprBr, static_cast<byte>(depth)
#define WASM_UNREACHABLE kExprUnreachable
#define WASM_RETURN(...) __VA_ARGS__, kExprReturn
+#define WASM_RETURN0 kExprReturn
#define WASM_BR_TABLE(key, count, ...) \
key, kExprBrTable, U32V_1(count), __VA_ARGS__
-#define WASM_CASE(x) static_cast<byte>(x), static_cast<byte>(x >> 8)
-#define WASM_CASE_BR(x) static_cast<byte>(x), static_cast<byte>(0x80 | (x) >> 8)
-
#define WASM_THROW(index) kExprThrow, static_cast<byte>(index)
//------------------------------------------------------------------------------
@@ -491,10 +485,14 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
// Heap-allocated object operations.
//------------------------------------------------------------------------------
#define WASM_GC_OP(op) kGCPrefix, static_cast<byte>(op)
+#define WASM_STRUCT_NEW(index, ...) \
+ __VA_ARGS__, WASM_GC_OP(kExprStructNew), static_cast<byte>(index)
#define WASM_STRUCT_NEW_WITH_RTT(index, ...) \
__VA_ARGS__, WASM_GC_OP(kExprStructNewWithRtt), static_cast<byte>(index)
-#define WASM_STRUCT_NEW_DEFAULT(index, rtt) \
- rtt, WASM_GC_OP(kExprStructNewDefault), static_cast<byte>(index)
+#define WASM_STRUCT_NEW_DEFAULT(index) \
+ WASM_GC_OP(kExprStructNewDefault), static_cast<byte>(index)
+#define WASM_STRUCT_NEW_DEFAULT_WITH_RTT(index, rtt) \
+ rtt, WASM_GC_OP(kExprStructNewDefaultWithRtt), static_cast<byte>(index)
#define WASM_STRUCT_GET(typeidx, fieldidx, struct_obj) \
struct_obj, WASM_GC_OP(kExprStructGet), static_cast<byte>(typeidx), \
static_cast<byte>(fieldidx)
@@ -513,13 +511,23 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define WASM_REF_AS_NON_NULL(val) val, kExprRefAsNonNull
#define WASM_REF_EQ(lhs, rhs) lhs, rhs, kExprRefEq
#define WASM_REF_TEST(ref, rtt) ref, rtt, WASM_GC_OP(kExprRefTest)
+#define WASM_REF_TEST_STATIC(ref, typeidx) \
+ ref, WASM_GC_OP(kExprRefTestStatic), static_cast<byte>(typeidx)
#define WASM_REF_CAST(ref, rtt) ref, rtt, WASM_GC_OP(kExprRefCast)
+#define WASM_REF_CAST_STATIC(ref, typeidx) \
+ ref, WASM_GC_OP(kExprRefCastStatic), static_cast<byte>(typeidx)
// Takes a reference value from the value stack to allow sequences of
// conditional branches.
#define WASM_BR_ON_CAST(depth, rtt) \
rtt, WASM_GC_OP(kExprBrOnCast), static_cast<byte>(depth)
+#define WASM_BR_ON_CAST_STATIC(depth, typeidx) \
+ WASM_GC_OP(kExprBrOnCastStatic), static_cast<byte>(depth), \
+ static_cast<byte>(typeidx)
#define WASM_BR_ON_CAST_FAIL(depth, rtt) \
rtt, WASM_GC_OP(kExprBrOnCastFail), static_cast<byte>(depth)
+#define WASM_BR_ON_CAST_STATIC_FAIL(depth, typeidx) \
+ WASM_GC_OP(kExprBrOnCastStaticFail), static_cast<byte>(depth), \
+ static_cast<byte>(typeidx)
#define WASM_REF_IS_FUNC(ref) ref, WASM_GC_OP(kExprRefIsFunc)
#define WASM_REF_IS_DATA(ref) ref, WASM_GC_OP(kExprRefIsData)
@@ -539,11 +547,15 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define WASM_BR_ON_NON_I31(depth) \
WASM_GC_OP(kExprBrOnNonI31), static_cast<byte>(depth)
+#define WASM_ARRAY_NEW(index, default_value, length) \
+ default_value, length, WASM_GC_OP(kExprArrayNew), static_cast<byte>(index)
#define WASM_ARRAY_NEW_WITH_RTT(index, default_value, length, rtt) \
default_value, length, rtt, WASM_GC_OP(kExprArrayNewWithRtt), \
static_cast<byte>(index)
-#define WASM_ARRAY_NEW_DEFAULT(index, length, rtt) \
- length, rtt, WASM_GC_OP(kExprArrayNewDefault), static_cast<byte>(index)
+#define WASM_ARRAY_NEW_DEFAULT(index, length) \
+ length, WASM_GC_OP(kExprArrayNewDefault), static_cast<byte>(index)
+#define WASM_ARRAY_NEW_DEFAULT_WITH_RTT(index, length, rtt) \
+ length, rtt, WASM_GC_OP(kExprArrayNewDefaultWithRtt), static_cast<byte>(index)
#define WASM_ARRAY_GET(typeidx, array, index) \
array, index, WASM_GC_OP(kExprArrayGet), static_cast<byte>(typeidx)
#define WASM_ARRAY_GET_U(typeidx, array, index) \
@@ -562,6 +574,9 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define WASM_ARRAY_INIT(index, length, ...) \
__VA_ARGS__, WASM_GC_OP(kExprArrayInit), static_cast<byte>(index), \
static_cast<byte>(length)
+#define WASM_ARRAY_INIT_STATIC(index, length, ...) \
+ __VA_ARGS__, WASM_GC_OP(kExprArrayInitStatic), static_cast<byte>(index), \
+ static_cast<byte>(length)
#define WASM_RTT_WITH_DEPTH(depth, typeidx) \
kRttWithDepthCode, U32V_1(depth), U32V_1(typeidx)
@@ -855,9 +870,6 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
val, cond, kExprBrIf, static_cast<byte>(depth)
#define WASM_BRV_IFD(depth, val, cond) \
val, cond, kExprBrIf, static_cast<byte>(depth), kExprDrop
-#define WASM_IFB(cond, ...) cond, kExprIf, kVoidCode, __VA_ARGS__, kExprEnd
-#define WASM_BR_TABLEV(val, key, count, ...) \
- val, key, kExprBrTable, U32V_1(count), __VA_ARGS__
//------------------------------------------------------------------------------
// Atomic Operations.
diff --git a/deps/v8/test/fuzzer/regexp.cc b/deps/v8/test/fuzzer/regexp.cc
index c51ac98108..b29c0d18bb 100644
--- a/deps/v8/test/fuzzer/regexp.cc
+++ b/deps/v8/test/fuzzer/regexp.cc
@@ -75,7 +75,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
i::Handle<i::JSRegExp> regexp;
{
CHECK(!i_isolate->has_pending_exception());
- v8::TryCatch try_catch(isolate);
+ v8::TryCatch try_catch_inner(isolate);
// Create a string so that we can calculate a hash from the input data.
std::string str = std::string(reinterpret_cast<const char*>(data), size);
i::JSRegExp::Flags flag = static_cast<i::JSRegExp::Flags>(
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index b2052d8c28..31cd0d7add 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -30,6 +30,9 @@ namespace fuzzer {
namespace {
+constexpr int kMaxArrays = 4;
+constexpr int kMaxStructs = 4;
+constexpr int kMaxStructFields = 4;
constexpr int kMaxFunctions = 4;
constexpr int kMaxGlobals = 64;
constexpr int kMaxParameters = 15;
@@ -100,8 +103,8 @@ bool DataRange::get() {
return get<uint8_t>() % 2;
}
-ValueType GetValueType(uint32_t num_types, DataRange* data,
- bool liftoff_as_reference) {
+ValueType GetValueType(DataRange* data, bool liftoff_as_reference,
+ uint32_t num_types) {
constexpr ValueType types[] = {
kWasmI32, kWasmI64,
kWasmF32, kWasmF64,
@@ -111,13 +114,13 @@ ValueType GetValueType(uint32_t num_types, DataRange* data,
constexpr int kLiftoffOnlyTypeCount = 3; // at the end of {types}.
if (liftoff_as_reference) {
- // TODO(11954): Only generate signature types that correspond to functions
uint32_t id = data->get<uint8_t>() % (arraysize(types) + num_types);
if (id >= arraysize(types)) {
return ValueType::Ref(id - arraysize(types), kNullable);
}
return types[id];
}
+
return types[data->get<uint8_t>() %
(arraysize(types) - kLiftoffOnlyTypeCount)];
}
@@ -523,13 +526,14 @@ class WasmGenerator {
}
void drop(DataRange* data) {
- Generate(GetValueType(builder_->builder()->NumTypes(), data,
- liftoff_as_reference_),
+ Generate(GetValueType(data, liftoff_as_reference_,
+ static_cast<uint32_t>(functions_.size()) +
+ num_structs_ + num_arrays_),
data);
builder_->Emit(kExprDrop);
}
- enum CallDirect : bool { kCallDirect = true, kCallIndirect = false };
+ enum CallKind { kCallDirect, kCallIndirect, kCallRef };
template <ValueKind wanted_kind>
void call(DataRange* data) {
@@ -541,6 +545,15 @@ class WasmGenerator {
call(data, ValueType::Primitive(wanted_kind), kCallIndirect);
}
+ template <ValueKind wanted_kind>
+ void call_ref(DataRange* data) {
+ if (liftoff_as_reference_) {
+ call(data, ValueType::Primitive(wanted_kind), kCallRef);
+ } else {
+ Generate<wanted_kind>(data);
+ }
+ }
+
void Convert(ValueType src, ValueType dst) {
auto idx = [](ValueType t) -> int {
switch (t.kind()) {
@@ -569,7 +582,7 @@ class WasmGenerator {
builder_->Emit(kConvertOpcodes[arr_idx]);
}
- void call(DataRange* data, ValueType wanted_kind, CallDirect call_direct) {
+ void call(DataRange* data, ValueType wanted_kind, CallKind call_kind) {
uint8_t random_byte = data->get<uint8_t>();
int func_index = random_byte % functions_.size();
uint32_t sig_index = functions_[func_index];
@@ -586,27 +599,33 @@ class WasmGenerator {
std::equal(sig->returns().begin(), sig->returns().end(),
builder_->signature()->returns().begin(),
builder_->signature()->returns().end())) {
- if (call_direct) {
+ if (call_kind == kCallDirect) {
builder_->EmitWithU32V(kExprReturnCall, func_index);
- } else {
+ } else if (call_kind == kCallIndirect) {
// This will not trap because table[func_index] always contains function
// func_index.
builder_->EmitI32Const(func_index);
builder_->EmitWithU32V(kExprReturnCallIndirect, sig_index);
// TODO(11954): Use other table indices too.
builder_->EmitByte(0); // Table index.
+ } else {
+ GenerateOptRef(HeapType(sig_index), data);
+ builder_->Emit(kExprReturnCallRef);
}
return;
} else {
- if (call_direct) {
+ if (call_kind == kCallDirect) {
builder_->EmitWithU32V(kExprCallFunction, func_index);
- } else {
+ } else if (call_kind == kCallIndirect) {
// This will not trap because table[func_index] always contains function
// func_index.
builder_->EmitI32Const(func_index);
builder_->EmitWithU32V(kExprCallIndirect, sig_index);
// TODO(11954): Use other table indices too.
builder_->EmitByte(0); // Table index.
+ } else {
+ GenerateOptRef(HeapType(sig_index), data);
+ builder_->Emit(kExprCallRef);
}
}
if (sig->return_count() == 0 && wanted_kind != kWasmVoid) {
@@ -795,7 +814,7 @@ class WasmGenerator {
if (new_default) {
builder_->EmitWithPrefix(kExprRttCanon);
builder_->EmitU32V(index);
- builder_->EmitWithPrefix(kExprStructNewDefault);
+ builder_->EmitWithPrefix(kExprStructNewDefaultWithRtt);
builder_->EmitU32V(index);
} else {
StructType* struct_gen = builder_->builder()->GetStructType(index);
@@ -813,7 +832,7 @@ class WasmGenerator {
Generate(kWasmI32, data);
builder_->EmitWithPrefix(kExprRttCanon);
builder_->EmitU32V(index);
- builder_->EmitWithPrefix(kExprArrayNewDefault);
+ builder_->EmitWithPrefix(kExprArrayNewDefaultWithRtt);
builder_->EmitU32V(index);
} else {
Generate(builder_->builder()->GetArrayType(index)->element_type(),
@@ -831,12 +850,13 @@ class WasmGenerator {
WasmFunctionBuilder* func = builder_->builder()->GetFunction(i);
// TODO(11954): Choose a random function from among those matching the
// signature (consider function subtyping?).
- if (func->sig_index() == index) {
+ if (*(func->signature()) ==
+ *(builder_->builder()->GetSignature(index))) {
builder_->EmitWithU32V(kExprRefFunc, func->func_index());
return;
}
}
- ref_null(type, data);
+ UNREACHABLE();
}
}
@@ -865,9 +885,9 @@ class WasmGenerator {
}
void table_get(HeapType type, DataRange* data) {
ValueType needed_type = ValueType::Ref(type, kNullable);
- int table_size = builder_->builder()->NumTables();
+ int table_count = builder_->builder()->NumTables();
ZoneVector<uint32_t> table(builder_->builder()->zone());
- for (int i = 0; i < table_size; i++) {
+ for (int i = 0; i < table_count; i++) {
if (builder_->builder()->GetTableType(i) == needed_type) {
table.push_back(i);
}
@@ -891,9 +911,106 @@ class WasmGenerator {
void table_fill(DataRange* data) {
table_op<kVoid>({kWasmI32, kWasmFuncRef, kWasmI32}, data, kExprTableFill);
}
+ void table_copy(DataRange* data) {
+ ValueType needed_type =
+ data->get<bool>()
+ ? ValueType::Ref(HeapType(HeapType::kFunc), kNullable)
+ : ValueType::Ref(HeapType(HeapType::kExtern), kNullable);
+ int table_count = builder_->builder()->NumTables();
+ ZoneVector<uint32_t> table(builder_->builder()->zone());
+ for (int i = 0; i < table_count; i++) {
+ if (builder_->builder()->GetTableType(i) == needed_type) {
+ table.push_back(i);
+ }
+ }
+ if (table.empty()) {
+ return;
+ }
+ int first_index = data->get<uint8_t>() % static_cast<int>(table.size());
+ int second_index = data->get<uint8_t>() % static_cast<int>(table.size());
+ Generate(kWasmI32, data);
+ Generate(kWasmI32, data);
+ Generate(kWasmI32, data);
+ builder_->EmitWithPrefix(kExprTableCopy);
+ builder_->EmitU32V(table[first_index]);
+ builder_->EmitU32V(table[second_index]);
+ }
+
+ bool array_get_helper(ValueType value_type, DataRange* data) {
+ WasmModuleBuilder* builder = builder_->builder();
+ ZoneVector<uint32_t> array_indices(builder->zone());
+
+ for (uint32_t i = num_structs_; i < num_arrays_ + num_structs_; i++) {
+ DCHECK(builder->IsArrayType(i));
+ if (builder->GetArrayType(i)->element_type() == value_type) {
+ array_indices.push_back(i);
+ }
+ }
+
+ if (!array_indices.empty()) {
+ int index = data->get<uint8_t>() % static_cast<int>(array_indices.size());
+ GenerateOptRef(HeapType(array_indices[index]), data);
+ Generate(kWasmI32, data);
+ builder_->EmitWithPrefix(kExprArrayGet);
+ builder_->EmitU32V(array_indices[index]);
+ return true;
+ }
+
+ return false;
+ }
template <ValueKind wanted_kind>
- void struct_get(DataRange* data) {
+ void array_get(DataRange* data) {
+ bool got_array_value =
+ array_get_helper(ValueType::Primitive(wanted_kind), data);
+ if (!got_array_value) {
+ Generate<wanted_kind>(data);
+ }
+ }
+
+ void array_get_opt_ref(HeapType type, DataRange* data) {
+ ValueType needed_type = ValueType::Ref(type, kNullable);
+ bool got_array_value = array_get_helper(needed_type, data);
+ if (!got_array_value) {
+ ref_null(type, data);
+ }
+ }
+
+ void array_len(DataRange* data) {
+ if (num_arrays_ > 1) {
+ int array_index = (data->get<uint8_t>() % num_arrays_) + num_structs_;
+ DCHECK(builder_->builder()->IsArrayType(array_index));
+ GenerateOptRef(HeapType(array_index), data);
+ builder_->EmitWithPrefix(kExprArrayLen);
+ builder_->EmitU32V(array_index);
+ } else {
+ Generate(kWasmI32, data);
+ }
+ }
+
+ void array_set(DataRange* data) {
+ WasmModuleBuilder* builder = builder_->builder();
+ ZoneVector<uint32_t> array_indices(builder->zone());
+ for (uint32_t i = num_structs_; i < num_arrays_ + num_structs_; i++) {
+ DCHECK(builder->IsArrayType(i));
+ if (builder->GetArrayType(i)->mutability()) {
+ array_indices.push_back(i);
+ }
+ }
+
+ if (array_indices.empty()) {
+ return;
+ }
+
+ int index = data->get<uint8_t>() % static_cast<int>(array_indices.size());
+ GenerateOptRef(HeapType(array_indices[index]), data);
+ Generate(kWasmI32, data);
+ Generate(builder->GetArrayType(array_indices[index])->element_type(), data);
+ builder_->EmitWithPrefix(kExprArraySet);
+ builder_->EmitU32V(array_indices[index]);
+ }
+
+ bool struct_get_helper(ValueType value_type, DataRange* data) {
WasmModuleBuilder* builder = builder_->builder();
ZoneVector<uint32_t> field_index(builder->zone());
ZoneVector<uint32_t> struct_index(builder->zone());
@@ -901,34 +1018,59 @@ class WasmGenerator {
DCHECK(builder->IsStructType(i));
int field_count = builder->GetStructType(i)->field_count();
for (int index = 0; index < field_count; index++) {
- if (builder->GetStructType(i)->field(index).kind() == wanted_kind) {
+ if (builder->GetStructType(i)->field(index) == value_type) {
field_index.push_back(index);
struct_index.push_back(i);
}
}
}
- if (field_index.empty()) {
+ if (!field_index.empty()) {
+ int index = data->get<uint8_t>() % static_cast<int>(field_index.size());
+ GenerateOptRef(HeapType(struct_index[index]), data);
+ builder_->EmitWithPrefix(kExprStructGet);
+ builder_->EmitU32V(struct_index[index]);
+ builder_->EmitU32V(field_index[index]);
+ return true;
+ }
+ return false;
+ }
+
+ template <ValueKind wanted_kind>
+ void struct_get(DataRange* data) {
+ bool got_struct_value =
+ struct_get_helper(ValueType::Primitive(wanted_kind), data);
+ if (!got_struct_value) {
Generate<wanted_kind>(data);
- return;
}
- int index = data->get<uint8_t>() % static_cast<int>(field_index.size());
- GenerateOptRef(HeapType(struct_index[index]), data);
- builder_->EmitWithPrefix(kExprStructGet);
- builder_->EmitU32V(struct_index[index]);
- builder_->EmitU32V(field_index[index]);
}
+
+ void struct_get_opt_ref(HeapType type, DataRange* data) {
+ ValueType needed_type = ValueType::Ref(type, kNullable);
+ bool got_struct_value = struct_get_helper(needed_type, data);
+ if (!got_struct_value) {
+ ref_null(type, data);
+ }
+ }
+
void struct_set(DataRange* data) {
WasmModuleBuilder* builder = builder_->builder();
if (num_structs_ > 0) {
int struct_index = data->get<uint8_t>() % num_structs_;
DCHECK(builder->IsStructType(struct_index));
- int field_count = builder->GetStructType(struct_index)->field_count();
- if (field_count == 0) {
+ StructType* struct_type = builder->GetStructType(struct_index);
+ ZoneVector<uint32_t> field_indices(builder->zone());
+ for (uint32_t i = 0; i < struct_type->field_count(); i++) {
+ if (struct_type->mutability(i)) {
+ field_indices.push_back(i);
+ }
+ }
+ if (field_indices.empty()) {
return;
}
- int field_index = data->get<uint8_t>() % field_count;
+ int field_index =
+ field_indices[data->get<uint8_t>() % field_indices.size()];
GenerateOptRef(HeapType(struct_index), data);
- Generate(builder->GetStructType(struct_index)->field(field_index), data);
+ Generate(struct_type->field(field_index), data);
builder_->EmitWithPrefix(kExprStructSet);
builder_->EmitU32V(struct_index);
builder_->EmitU32V(field_index);
@@ -1009,8 +1151,9 @@ class WasmGenerator {
constexpr uint32_t kMaxLocals = 32;
locals_.resize(data->get<uint8_t>() % kMaxLocals);
for (ValueType& local : locals_) {
- local = GetValueType(builder_->builder()->NumTypes(), data,
- liftoff_as_reference_);
+ local = GetValueType(data, liftoff_as_reference_,
+ static_cast<uint32_t>(functions_.size()) +
+ num_structs_ + num_arrays_);
fn->AddLocal(local);
}
}
@@ -1111,6 +1254,7 @@ void WasmGenerator::Generate<kVoid>(DataRange* data) {
&WasmGenerator::call<kVoid>,
&WasmGenerator::call_indirect<kVoid>,
+ &WasmGenerator::call_ref<kVoid>,
&WasmGenerator::set_local,
&WasmGenerator::set_global,
@@ -1118,9 +1262,11 @@ void WasmGenerator::Generate<kVoid>(DataRange* data) {
&WasmGenerator::try_block<kVoid>,
&WasmGenerator::struct_set,
+ &WasmGenerator::array_set,
&WasmGenerator::table_set,
- &WasmGenerator::table_fill};
+ &WasmGenerator::table_fill,
+ &WasmGenerator::table_copy};
GenerateOneOf(alternatives, data);
}
@@ -1269,9 +1415,12 @@ void WasmGenerator::Generate<kI32>(DataRange* data) {
&WasmGenerator::call<kI32>,
&WasmGenerator::call_indirect<kI32>,
+ &WasmGenerator::call_ref<kI32>,
&WasmGenerator::try_block<kI32>,
&WasmGenerator::struct_get<kI32>,
+ &WasmGenerator::array_get<kI32>,
+ &WasmGenerator::array_len,
&WasmGenerator::ref_is_null<kI32>,
&WasmGenerator::ref_eq,
@@ -1392,9 +1541,11 @@ void WasmGenerator::Generate<kI64>(DataRange* data) {
&WasmGenerator::call<kI64>,
&WasmGenerator::call_indirect<kI64>,
+ &WasmGenerator::call_ref<kI64>,
&WasmGenerator::try_block<kI64>,
- &WasmGenerator::struct_get<kI64>};
+ &WasmGenerator::struct_get<kI64>,
+ &WasmGenerator::array_get<kI64>};
GenerateOneOf(alternatives, data);
}
@@ -1452,9 +1603,11 @@ void WasmGenerator::Generate<kF32>(DataRange* data) {
&WasmGenerator::call<kF32>,
&WasmGenerator::call_indirect<kF32>,
+ &WasmGenerator::call_ref<kF32>,
&WasmGenerator::try_block<kF32>,
- &WasmGenerator::struct_get<kF32>};
+ &WasmGenerator::struct_get<kF32>,
+ &WasmGenerator::array_get<kF32>};
GenerateOneOf(alternatives, data);
}
@@ -1512,9 +1665,11 @@ void WasmGenerator::Generate<kF64>(DataRange* data) {
&WasmGenerator::call<kF64>,
&WasmGenerator::call_indirect<kF64>,
+ &WasmGenerator::call_ref<kF64>,
&WasmGenerator::try_block<kF64>,
- &WasmGenerator::struct_get<kF64>};
+ &WasmGenerator::struct_get<kF64>,
+ &WasmGenerator::array_get<kF64>};
GenerateOneOf(alternatives, data);
}
@@ -1792,6 +1947,12 @@ void WasmGenerator::Generate(ValueType type, DataRange* data) {
}
void WasmGenerator::GenerateOptRef(HeapType type, DataRange* data) {
+ GeneratorRecursionScope rec_scope(this);
+ if (recursion_limit_reached() || data->size() == 0) {
+ ref_null(type, data);
+ return;
+ }
+
switch (type.representation()) {
// For abstract types, generate one of their subtypes, or fall back to the
// default case.
@@ -1824,11 +1985,9 @@ void WasmGenerator::GenerateOptRef(HeapType type, DataRange* data) {
break;
}
case HeapType::kFunc: {
- uint32_t num_signatures =
- builder_->builder()->NumTypes() - num_structs_ - num_arrays_;
- uint32_t random = data->get<uint32_t>() % (num_signatures + 1);
+ uint32_t random = data->get<uint32_t>() % (functions_.size() + 1);
if (random > 0) {
- uint32_t signature_index = random + num_arrays_ + num_structs_ - 1;
+ uint32_t signature_index = functions_[random - 1];
DCHECK(builder_->builder()->IsSignature(signature_index));
GenerateOptRef(HeapType(signature_index), data);
return;
@@ -1843,14 +2002,17 @@ void WasmGenerator::GenerateOptRef(HeapType type, DataRange* data) {
constexpr GenerateFnWithHeap alternatives_with_index[] = {
&WasmGenerator::new_object, &WasmGenerator::get_local_opt_ref,
+ &WasmGenerator::array_get_opt_ref, &WasmGenerator::struct_get_opt_ref,
&WasmGenerator::ref_null};
constexpr GenerateFnWithHeap alternatives_func_extern[] = {
&WasmGenerator::table_get, &WasmGenerator::get_local_opt_ref,
+ &WasmGenerator::array_get_opt_ref, &WasmGenerator::struct_get_opt_ref,
&WasmGenerator::ref_null};
constexpr GenerateFnWithHeap alternatives_null[] = {
- &WasmGenerator::ref_null, &WasmGenerator::get_local_opt_ref};
+ &WasmGenerator::array_get_opt_ref, &WasmGenerator::ref_null,
+ &WasmGenerator::get_local_opt_ref, &WasmGenerator::struct_get_opt_ref};
if (liftoff_as_reference_ && type.is_index()) {
GenerateOneOf(alternatives_with_index, type, data);
@@ -1865,8 +2027,9 @@ std::vector<ValueType> WasmGenerator::GenerateTypes(DataRange* data) {
std::vector<ValueType> types;
int num_params = int{data->get<uint8_t>()} % (kMaxParameters + 1);
for (int i = 0; i < num_params; ++i) {
- types.push_back(GetValueType(builder_->builder()->NumTypes(), data,
- liftoff_as_reference_));
+ types.push_back(GetValueType(
+ data, liftoff_as_reference_,
+ num_structs_ + num_arrays_ + static_cast<uint32_t>(functions_.size())));
}
return types;
}
@@ -1921,7 +2084,7 @@ void WasmGenerator::ConsumeAndGenerate(
enum SigKind { kFunctionSig, kExceptionSig };
FunctionSig* GenerateSig(Zone* zone, DataRange* data, SigKind sig_kind,
- uint32_t num_types, bool liftoff_as_reference) {
+ bool liftoff_as_reference, int num_types) {
// Generate enough parameters to spill some to the stack.
int num_params = int{data->get<uint8_t>()} % (kMaxParameters + 1);
int num_returns = sig_kind == kFunctionSig
@@ -1930,10 +2093,10 @@ FunctionSig* GenerateSig(Zone* zone, DataRange* data, SigKind sig_kind,
FunctionSig::Builder builder(zone, num_returns, num_params);
for (int i = 0; i < num_returns; ++i) {
- builder.AddReturn(GetValueType(num_types, data, liftoff_as_reference));
+ builder.AddReturn(GetValueType(data, liftoff_as_reference, num_types));
}
for (int i = 0; i < num_params; ++i) {
- builder.AddParam(GetValueType(num_types, data, liftoff_as_reference));
+ builder.AddParam(GetValueType(data, liftoff_as_reference, num_types));
}
return builder.Build();
}
@@ -1955,36 +2118,44 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
// these types in function signatures.
// Currently, WasmGenerator assumes this order for struct/array/signature
// definitions.
- uint32_t num_structs = 0, num_arrays = 0;
+
+ uint8_t num_structs = 0;
+ uint8_t num_arrays = 0;
+ static_assert(kMaxFunctions >= 1, "need min. 1 function");
+ uint8_t num_functions = 1 + (range.get<uint8_t>() % kMaxFunctions);
+ uint16_t num_types = num_functions;
+
if (liftoff_as_reference) {
- num_structs = 1;
- num_arrays = 4;
- uint32_t count = 4;
- StructType::Builder struct_builder(zone, count);
- struct_builder.AddField(kWasmI32, true);
- struct_builder.AddField(kWasmI64, true);
- struct_builder.AddField(kWasmF32, true);
- struct_builder.AddField(kWasmF64, true);
- StructType* struct_fuz = struct_builder.Build();
- builder.AddStructType(struct_fuz);
- ArrayType* array_fuzI32 = zone->New<ArrayType>(kWasmI32, true);
- ArrayType* array_fuzI64 = zone->New<ArrayType>(kWasmI64, true);
- ArrayType* array_fuzF32 = zone->New<ArrayType>(kWasmF32, true);
- ArrayType* array_fuzF64 = zone->New<ArrayType>(kWasmF64, true);
- builder.AddArrayType(array_fuzI32);
- builder.AddArrayType(array_fuzI64);
- builder.AddArrayType(array_fuzF32);
- builder.AddArrayType(array_fuzF64);
+ num_structs = range.get<uint8_t>() % (kMaxStructs + 1);
+ num_arrays = range.get<uint8_t>() % (kMaxArrays + 1);
+
+ num_types += num_structs + num_arrays;
+
+ for (int struct_index = 0; struct_index < num_structs; struct_index++) {
+ uint8_t num_fields = range.get<uint8_t>() % (kMaxStructFields + 1);
+ StructType::Builder struct_builder(zone, num_fields);
+ for (int field_index = 0; field_index < num_fields; field_index++) {
+ ValueType type = GetValueType(&range, true, num_types);
+ bool mutability = range.get<uint8_t>() < 127;
+ struct_builder.AddField(type, mutability);
+ }
+ StructType* struct_fuz = struct_builder.Build();
+ builder.AddStructType(struct_fuz);
+ }
+
+ for (int array_index = 0; array_index < num_arrays; array_index++) {
+ ValueType type = GetValueType(&range, true, num_types);
+ ArrayType* array_fuz = zone->New<ArrayType>(type, true);
+ builder.AddArrayType(array_fuz);
+ }
}
- function_signatures.push_back(builder.AddSignature(sigs.i_iii()));
- static_assert(kMaxFunctions >= 1, "need min. 1 function");
- int num_functions = 1 + (range.get<uint8_t>() % kMaxFunctions);
+ function_signatures.push_back(builder.ForceAddSignature(sigs.i_iii()));
for (int i = 1; i < num_functions; ++i) {
FunctionSig* sig = GenerateSig(zone, &range, kFunctionSig,
- builder.NumTypes(), liftoff_as_reference);
- uint32_t signature_index = builder.AddSignature(sig);
+ liftoff_as_reference, num_types);
+ uint32_t signature_index = builder.ForceAddSignature(sig);
function_signatures.push_back(signature_index);
}
@@ -1997,13 +2168,12 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
int num_exceptions = 1 + (range.get<uint8_t>() % kMaxExceptions);
for (int i = 0; i < num_exceptions; ++i) {
FunctionSig* sig = GenerateSig(zone, &range, kExceptionSig,
- builder.NumTypes(), liftoff_as_reference);
+ liftoff_as_reference, num_types);
builder.AddException(sig);
}
for (int i = 0; i < num_globals; ++i) {
- ValueType type =
- GetValueType(builder.NumTypes(), &range, liftoff_as_reference);
+ ValueType type = GetValueType(&range, liftoff_as_reference, num_types);
// 1/8 of globals are immutable.
const bool mutability = (range.get<uint8_t>() % 8) != 0;
builder.AddGlobal(type, mutability, WasmInitExpr());
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index 7165f1994a..ac248a7633 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -122,7 +122,6 @@ void InterpretAndExecuteModule(i::Isolate* isolate,
int32_t result_ref = 0;
int32_t result = 0;
- auto interpreter_result = testing::WasmInterpretationResult::Failed();
if (module_ref.is_null()) {
base::OwnedVector<WasmValue> arguments =
testing::MakeDefaultInterpreterArguments(isolate, main_function->sig());
@@ -183,7 +182,7 @@ void InterpretAndExecuteModule(i::Isolate* isolate,
if (exception_ref != exception) {
const char* exception_text[] = {"no exception", "exception"};
- FATAL("expected: %s; got: %s", exception_text[interpreter_result.trapped()],
+ FATAL("expected: %s; got: %s", exception_text[exception_ref],
exception_text[exception]);
}
@@ -294,7 +293,6 @@ std::ostream& operator<<(std::ostream& os, WasmElemSegment::Entry entry) {
// Appends an initializer expression encoded in {wire_bytes}, in the offset
// contained in {expr}.
-// TODO(7748): Find a way to implement other expressions here.
void AppendInitExpr(std::ostream& os, ModuleWireBytes wire_bytes,
WireBytesRef expr) {
Decoder decoder(wire_bytes.module_bytes());
diff --git a/deps/v8/test/inspector/debugger/async-for-await-of-promise-stack-expected.txt b/deps/v8/test/inspector/debugger/async-for-await-of-promise-stack-expected.txt
index 880a982c7c..b78d5d5bb9 100644
--- a/deps/v8/test/inspector/debugger/async-for-await-of-promise-stack-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-for-await-of-promise-stack-expected.txt
@@ -3,8 +3,7 @@ Checks that async chains for for-await-of are correct.
Running test: testBasic
Debugger (test.js:10:2)
Basic (test.js:48:4)
--- async function --
-Basic (test.js:47:17)
+-- await --
(anonymous) (testBasic.js:0:0)
@@ -25,16 +24,14 @@ UncaughtThrow (test.js:67:21)
Running test: testCaughtReject
Debugger (test.js:10:2)
CaughtReject (test.js:76:4)
--- async function --
-CaughtReject (test.js:72:19)
+-- await --
(anonymous) (testCaughtReject.js:0:0)
Running test: testCaughtThrow
Debugger (test.js:10:2)
CaughtThrow (test.js:86:4)
--- async function --
-CaughtThrow (test.js:82:19)
+-- await --
(anonymous) (testCaughtThrow.js:0:0)
@@ -52,7 +49,6 @@ Running test: testCaughtRejectOnBreak
Running test: testCaughtThrowOnBreak
Debugger (test.js:10:2)
CaughtThrowOnBreak (test.js:124:4)
--- async function --
-CaughtThrowOnBreak (test.js:120:19)
+-- await --
(anonymous) (testCaughtThrowOnBreak.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-function-step-out-expected.txt b/deps/v8/test/inspector/debugger/async-function-step-out-expected.txt
index 28fae0cbfe..4d0f7b3635 100644
--- a/deps/v8/test/inspector/debugger/async-function-step-out-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-function-step-out-expected.txt
@@ -3,54 +3,42 @@ stepOut async function
Running test: testTrivial
Check that we have proper async stack at return
bar (testTrivial.js:28:8)
--- async function --
-bar (testTrivial.js:27:22)
+-- await --
foo (testTrivial.js:23:14)
--- async function --
-foo (testTrivial.js:22:22)
+-- await --
test (testTrivial.js:18:14)
--- async function --
-test (testTrivial.js:17:22)
+-- await --
(anonymous) (:0:0)
foo (testTrivial.js:24:6)
--- async function --
-foo (testTrivial.js:22:22)
+-- await --
test (testTrivial.js:18:14)
--- async function --
-test (testTrivial.js:17:22)
+-- await --
(anonymous) (:0:0)
test (testTrivial.js:19:6)
--- async function --
-test (testTrivial.js:17:22)
+-- await --
(anonymous) (:0:0)
Running test: testStepOutPrecision
Check that stepOut go to resumed outer generator
bar (testStepOutPrecision.js:61:8)
--- async function --
-bar (testStepOutPrecision.js:60:22)
+-- await --
foo (testStepOutPrecision.js:55:14)
--- async function --
-foo (testStepOutPrecision.js:54:22)
+-- await --
test (testStepOutPrecision.js:48:14)
--- async function --
-test (testStepOutPrecision.js:47:14)
+-- await --
(anonymous) (:0:0)
foo (testStepOutPrecision.js:56:8)
--- async function --
-foo (testStepOutPrecision.js:54:22)
+-- await --
test (testStepOutPrecision.js:48:14)
--- async function --
-test (testStepOutPrecision.js:47:14)
+-- await --
(anonymous) (:0:0)
test (testStepOutPrecision.js:49:8)
--- async function --
-test (testStepOutPrecision.js:47:14)
+-- await --
(anonymous) (:0:0)
floodWithTimeouts (testStepOutPrecision.js:40:15)
@@ -66,8 +54,7 @@ test (testStepOutPrecision.js:46:8)
(anonymous) (:0:0)
test (testStepOutPrecision.js:50:8)
--- async function --
-test (testStepOutPrecision.js:47:14)
+-- await --
(anonymous) (:0:0)
floodWithTimeouts (testStepOutPrecision.js:40:15)
@@ -88,43 +75,33 @@ test (testStepOutPrecision.js:46:8)
Running test: testStepIntoAtReturn
Check that stepInto at return go to resumed outer generator
bar (testStepIntoAtReturn.js:93:8)
--- async function --
-bar (testStepIntoAtReturn.js:92:22)
+-- await --
foo (testStepIntoAtReturn.js:88:14)
--- async function --
-foo (testStepIntoAtReturn.js:87:22)
+-- await --
test (testStepIntoAtReturn.js:82:14)
--- async function --
-test (testStepIntoAtReturn.js:81:14)
+-- await --
(anonymous) (:0:0)
bar (testStepIntoAtReturn.js:94:6)
--- async function --
-bar (testStepIntoAtReturn.js:92:22)
+-- await --
foo (testStepIntoAtReturn.js:88:14)
--- async function --
-foo (testStepIntoAtReturn.js:87:22)
+-- await --
test (testStepIntoAtReturn.js:82:14)
--- async function --
-test (testStepIntoAtReturn.js:81:14)
+-- await --
(anonymous) (:0:0)
foo (testStepIntoAtReturn.js:89:6)
--- async function --
-foo (testStepIntoAtReturn.js:87:22)
+-- await --
test (testStepIntoAtReturn.js:82:14)
--- async function --
-test (testStepIntoAtReturn.js:81:14)
+-- await --
(anonymous) (:0:0)
test (testStepIntoAtReturn.js:83:8)
--- async function --
-test (testStepIntoAtReturn.js:81:14)
+-- await --
(anonymous) (:0:0)
test (testStepIntoAtReturn.js:84:6)
--- async function --
-test (testStepIntoAtReturn.js:81:14)
+-- await --
(anonymous) (:0:0)
floodWithTimeouts (testStepIntoAtReturn.js:74:15)
@@ -139,43 +116,33 @@ test (testStepIntoAtReturn.js:80:8)
Running test: testStepOverAtReturn
Check that stepOver at return go to resumed outer generator
bar (testStepIntoAtReturn.js:124:8)
--- async function --
-bar (testStepIntoAtReturn.js:123:22)
+-- await --
foo (testStepIntoAtReturn.js:119:14)
--- async function --
-foo (testStepIntoAtReturn.js:118:22)
+-- await --
test (testStepIntoAtReturn.js:113:14)
--- async function --
-test (testStepIntoAtReturn.js:112:14)
+-- await --
(anonymous) (:0:0)
bar (testStepIntoAtReturn.js:125:6)
--- async function --
-bar (testStepIntoAtReturn.js:123:22)
+-- await --
foo (testStepIntoAtReturn.js:119:14)
--- async function --
-foo (testStepIntoAtReturn.js:118:22)
+-- await --
test (testStepIntoAtReturn.js:113:14)
--- async function --
-test (testStepIntoAtReturn.js:112:14)
+-- await --
(anonymous) (:0:0)
foo (testStepIntoAtReturn.js:120:6)
--- async function --
-foo (testStepIntoAtReturn.js:118:22)
+-- await --
test (testStepIntoAtReturn.js:113:14)
--- async function --
-test (testStepIntoAtReturn.js:112:14)
+-- await --
(anonymous) (:0:0)
test (testStepIntoAtReturn.js:114:8)
--- async function --
-test (testStepIntoAtReturn.js:112:14)
+-- await --
(anonymous) (:0:0)
test (testStepIntoAtReturn.js:115:6)
--- async function --
-test (testStepIntoAtReturn.js:112:14)
+-- await --
(anonymous) (:0:0)
floodWithTimeouts (testStepIntoAtReturn.js:105:15)
@@ -190,19 +157,15 @@ test (testStepIntoAtReturn.js:111:8)
Running test: testStepOutFromNotAwaitedCall
Checks stepOut from not awaited call
bar (testStepIntoAtReturn.js:158:8)
--- async function --
-bar (testStepIntoAtReturn.js:157:22)
+-- await --
foo (testStepIntoAtReturn.js:152:8)
--- async function --
-foo (testStepIntoAtReturn.js:151:22)
+-- await --
test (testStepIntoAtReturn.js:144:14)
--- async function --
-test (testStepIntoAtReturn.js:143:14)
+-- await --
(anonymous) (:0:0)
test (testStepIntoAtReturn.js:145:8)
--- async function --
-test (testStepIntoAtReturn.js:143:14)
+-- await --
(anonymous) (:0:0)
floodWithTimeouts (testStepIntoAtReturn.js:136:15)
diff --git a/deps/v8/test/inspector/debugger/async-stack-await-expected.txt b/deps/v8/test/inspector/debugger/async-stack-await-expected.txt
index 0bb4de5f2c..892a5e5ceb 100644
--- a/deps/v8/test/inspector/debugger/async-stack-await-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-stack-await-expected.txt
@@ -1,34 +1,29 @@
Checks that async stacks works for async/await
foo2 (test.js:15:2)
--- async function --
-foo2 (test.js:14:16)
+-- await --
test (test.js:24:8)
(anonymous) (expr.js:0:0)
foo2 (test.js:17:2)
--- async function --
-foo2 (test.js:14:16)
+-- await --
test (test.js:24:8)
(anonymous) (expr.js:0:0)
foo1 (test.js:9:2)
foo2 (test.js:18:8)
--- async function --
-foo2 (test.js:14:16)
+-- await --
test (test.js:24:8)
(anonymous) (expr.js:0:0)
foo1 (test.js:9:2)
-- Promise.then --
foo2 (test.js:19:43)
--- async function --
-foo2 (test.js:14:16)
+-- await --
test (test.js:24:8)
(anonymous) (expr.js:0:0)
foo2 (test.js:20:2)
--- async function --
-foo2 (test.js:14:16)
+-- await --
test (test.js:24:8)
(anonymous) (expr.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames-expected.txt b/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames-expected.txt
index e61a4e117e..a3b6826878 100644
--- a/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames-expected.txt
+++ b/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames-expected.txt
@@ -7,8 +7,7 @@ asyncFact (test.js:9:2)
(anonymous) (expr.js:0:0)
asyncFact (test.js:11:2)
--- async function --
-asyncFact (test.js:3:20)
+-- await --
asyncFact (test.js:3:20)
asyncFact (test.js:3:20)
asyncFact (test.js:3:20)
@@ -22,8 +21,7 @@ asyncFact (test.js:9:2)
(anonymous) (expr.js:0:0)
asyncFact (test.js:11:2)
--- async function --
-asyncFact (test.js:3:20)
+-- await --
(anonymous) (expr.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/get-properties-paused-expected.txt b/deps/v8/test/inspector/debugger/get-properties-paused-expected.txt
index 1fea7a5549..8f2f976712 100644
--- a/deps/v8/test/inspector/debugger/get-properties-paused-expected.txt
+++ b/deps/v8/test/inspector/debugger/get-properties-paused-expected.txt
@@ -61,7 +61,7 @@ Running test: testArrayBuffer
Running test: testArrayBufferWithBrokenUintCtor
Internal properties
[[ArrayBufferByteLength]] number 7
- [[ArrayBufferData]] string 0x...
+ [[ArrayBufferData]] number 2
[[Int8Array]] object undefined
[[Prototype]] object undefined
[[Uint8Array]] object undefined
diff --git a/deps/v8/test/inspector/debugger/get-properties-paused.js b/deps/v8/test/inspector/debugger/get-properties-paused.js
index d76429db7f..6b6dfafc9d 100644
--- a/deps/v8/test/inspector/debugger/get-properties-paused.js
+++ b/deps/v8/test/inspector/debugger/get-properties-paused.js
@@ -96,11 +96,7 @@ let { Protocol } = InspectorTest.start('Checks Runtime.getProperties method whil
for (var i = 0; i < internalPropertyArray.length; i++) {
var p = internalPropertyArray[i];
var v = p.value;
- if (p.name === "[[ArrayBufferData]]")
- // Hex value for pointer is non-deterministic
- InspectorTest.log(` ${p.name} ${v.type} ${v.value.substr(0, 2)}...`);
- else
- InspectorTest.log(` ${p.name} ${v.type} ${v.value}`);
+ InspectorTest.log(` ${p.name} ${v.type} ${v.value}`);
}
}
diff --git a/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate-expected.txt b/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate-expected.txt
index 4364308d85..7182a8b7d5 100644
--- a/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate-expected.txt
+++ b/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate-expected.txt
@@ -7,6 +7,10 @@ g() returns 2
f() returns 1
g() throws EvalError
+Running test: testAsyncFunctions
+testAsyncFunction("resolve") : ok
+testAsyncFunction("reject") : throws
+
Running test: testDate
someGlobalDate.setDate(10) : throws
new Date().setDate(10) : ok
@@ -20,3 +24,6 @@ someGlobalDate.getFullYear() : ok
new Date().getFullYear() : ok
someGlobalDate.getHours() : ok
new Date().getHours() : ok
+
+Running test: testPromiseReject
+Promise.reject() : throws
diff --git a/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate.js b/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate.js
index 4a70fd38a2..bb801e5c1f 100644
--- a/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate.js
+++ b/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate.js
@@ -14,8 +14,19 @@ function testFunction()
f,g;
debugger;
}
+async function testAsyncFunction(action) {
+ switch (action) {
+ case "resolve": return 1;
+ case "reject": throw new Error();
+ }
+}
//# sourceURL=foo.js`);
+const check = async (expression) => {
+ const {result:{exceptionDetails}} = await Protocol.Runtime.evaluate({expression, throwOnSideEffect: true});
+ InspectorTest.log(expression + ' : ' + (exceptionDetails ? 'throws' : 'ok'));
+};
+
InspectorTest.runAsyncTestSuite([
async function basicTest() {
Protocol.Debugger.enable();
@@ -32,11 +43,12 @@ InspectorTest.runAsyncTestSuite([
InspectorTest.log('g() throws ' + className);
},
+ async function testAsyncFunctions() {
+ await check('testAsyncFunction("resolve")');
+ await check('testAsyncFunction("reject")');
+ },
+
async function testDate() {
- const check = async (expression) => {
- const {result:{exceptionDetails}} = await Protocol.Runtime.evaluate({expression, throwOnSideEffect: true});
- InspectorTest.log(expression + ' : ' + (exceptionDetails ? 'throws' : 'ok'));
- };
// setters are only ok on temporary objects
await check('someGlobalDate.setDate(10)');
await check('new Date().setDate(10)');
@@ -51,5 +63,9 @@ InspectorTest.runAsyncTestSuite([
await check('new Date().getFullYear()');
await check('someGlobalDate.getHours()');
await check('new Date().getHours()');
+ },
+
+ async function testPromiseReject() {
+ await check('Promise.reject()');
}
]);
diff --git a/deps/v8/test/inspector/inspector.status b/deps/v8/test/inspector/inspector.status
index 2fec7679dc..125b4ac700 100644
--- a/deps/v8/test/inspector/inspector.status
+++ b/deps/v8/test/inspector/inspector.status
@@ -522,4 +522,9 @@
'cpu-profiler/coverage-block': [SKIP],
}], # variant == turboprop or variant = turboprop_as_toptier
+##############################################################################
+['no_i18n == True', {
+ 'runtime/evaluate-without-side-effects-i18n': [SKIP],
+}], # no_i18n == True
+
]
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1253277-expected.txt b/deps/v8/test/inspector/regress/regress-crbug-1253277-expected.txt
new file mode 100644
index 0000000000..225a576d23
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1253277-expected.txt
@@ -0,0 +1,5 @@
+Regression test for crbug.com/1253277
+
+Running test: test
+function foo(){}|_|foo()
+
diff --git a/deps/v8/test/inspector/regress/regress-crbug-1253277.js b/deps/v8/test/inspector/regress/regress-crbug-1253277.js
new file mode 100644
index 0000000000..d98655f1b4
--- /dev/null
+++ b/deps/v8/test/inspector/regress/regress-crbug-1253277.js
@@ -0,0 +1,23 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {contextGroup, Protocol, session} = InspectorTest.start('Regression test for crbug.com/1253277');
+
+const url = 'foo.js';
+contextGroup.addScript('function foo(){}foo()', 0, 0, url);
+session.setupScriptMap();
+
+InspectorTest.runAsyncTestSuite([
+ async function test() {
+ await Promise.all([Protocol.Runtime.enable(), Protocol.Debugger.enable()]);
+ const {result: {breakpointId, locations}} = await Protocol.Debugger.setBreakpointByUrl({
+ columnNumber: 16,
+ lineNumber: 0,
+ url,
+ });
+ await session.logBreakLocations(locations);
+ await Protocol.Debugger.removeBreakpoint({breakpointId});
+ await Promise.all([Protocol.Runtime.disable(), Protocol.Debugger.disable()]);
+ }
+]);
diff --git a/deps/v8/test/inspector/runtime/evaluate-without-side-effects-i18n-expected.txt b/deps/v8/test/inspector/runtime/evaluate-without-side-effects-i18n-expected.txt
new file mode 100644
index 0000000000..2b03a67496
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/evaluate-without-side-effects-i18n-expected.txt
@@ -0,0 +1,74 @@
+Tests side-effect-free evaluation with i18n enabled
+
+Running test: testCollator
+Intl.Collator.supportedLocalesOf(["en-US"]) : ok
+new Intl.Collator("en-US") : ok
+someGlobalCollator.compare("foo", "bar") : ok
+someGlobalCollator.resolvedOptions() : ok
+
+Running test: testDateTimeFormat
+Intl.DateTimeFormat.supportedLocalesOf(["en-US"]) : ok
+new Intl.DateTimeFormat("en-US") : ok
+someGlobalDateTimeFormat.format(new Date(2021, 5)) : ok
+someGlobalDateTimeFormat.formatToParts(new Date(2021, 5)) : ok
+someGlobalDateTimeFormat.resolvedOptions() : ok
+someGlobalDateTimeFormat.formatRange(new Date(2021, 5), new Date(2022, 1)) : ok
+someGlobalDateTimeFormat.formatRangeToParts(new Date(2021, 5), new Date(2022, 1)) : ok
+
+Running test: testDisplayNames
+Intl.DisplayNames.supportedLocalesOf(["en-US"]) : ok
+new Intl.DisplayNames(["en-US"], {type: "region"}) : ok
+someGlobalDisplayNames.of("en") : ok
+someGlobalDisplayNames.resolvedOptions() : ok
+
+Running test: testIntl
+Intl.getCanonicalLocales("en-US") : ok
+
+Running test: testListFormat
+Intl.ListFormat.supportedLocalesOf(["en-US"]) : ok
+new Intl.ListFormat("en", { style: "long", type: "conjunction" }); : ok
+someGlobalListFormat.format(["a", "b"]) : ok
+someGlobalListFormat.formatToParts(["a", "b"]) : ok
+someGlobalListFormat.resolvedOptions() : ok
+
+Running test: testLocale
+new Intl.Locale("en-US") : ok
+someGlobalLocale.baseName : ok
+someGlobalLocale.calendar : ok
+someGlobalLocale.calendars : ok
+someGlobalLocale.caseFirst : ok
+someGlobalLocale.collation : ok
+someGlobalLocale.hourCycle : ok
+someGlobalLocale.hourCycles : ok
+someGlobalLocale.language : ok
+someGlobalLocale.numberingSystem : ok
+someGlobalLocale.numberingSystems : ok
+someGlobalLocale.numeric : ok
+someGlobalLocale.region : ok
+someGlobalLocale.script : ok
+someGlobalLocale.textInfo : ok
+someGlobalLocale.timeZones : ok
+someGlobalLocale.weekInfo : ok
+someGlobalLocale.maximize() : ok
+someGlobalLocale.minimize() : ok
+someGlobalLocale.toString() : ok
+
+Running test: testNumberFormat
+Intl.NumberFormat.supportedLocalesOf(["en-US"]) : ok
+new Intl.NumberFormat("de-DE", { style: "currency", currency: "EUR" }) : ok
+someGlobalNumberFormat.format(1) : ok
+someGlobalNumberFormat.formatToParts(1) : ok
+someGlobalNumberFormat.resolvedOptions() : ok
+
+Running test: testPluralRules
+Intl.PluralRules.supportedLocalesOf(["en-US"]) : ok
+new Intl.PluralRules("en-US") : ok
+someGlobalPluralRules.resolvedOptions() : ok
+someGlobalPluralRules.select(42) : ok
+
+Running test: testRelativeTimeFormat
+Intl.RelativeTimeFormat.supportedLocalesOf(["en-US"]) : ok
+new Intl.RelativeTimeFormat("en-US", {style: "narrow"}) : ok
+someGlobalRelativeTimeFormat.format(2, "day") : ok
+someGlobalRelativeTimeFormat.formatToParts(2, "day") : ok
+someGlobalRelativeTimeFormat.resolvedOptions() : ok
diff --git a/deps/v8/test/inspector/runtime/evaluate-without-side-effects-i18n.js b/deps/v8/test/inspector/runtime/evaluate-without-side-effects-i18n.js
new file mode 100644
index 0000000000..79b4f2b66b
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/evaluate-without-side-effects-i18n.js
@@ -0,0 +1,182 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests side-effect-free evaluation with i18n enabled');
+
+contextGroup.addScript(`
+var someGlobalCollator = new Intl.Collator("en-Latn-US");
+var someGlobalDateTimeFormat = new Intl.DateTimeFormat("en-Latn-US");
+var someGlobalDisplayNames = new Intl.DisplayNames(["en-Latn-US"], {type: 'region'});
+var someGlobalListFormat = new Intl.ListFormat('en', { style: 'long', type: 'conjunction' });
+var someGlobalLocale = new Intl.Locale("en-Latn-US", {language: "es"});
+var someGlobalNumberFormat = new Intl.NumberFormat('de-DE', { style: 'currency', currency: 'EUR' });
+var someGlobalPluralRules = new Intl.PluralRules('en-US');
+var someGlobalRelativeTimeFormat = new Intl.RelativeTimeFormat("en-US", {style: "narrow"});
+`, 0, 0, 'foo.js');
+
+const check = async (expression) => {
+ const {result:{exceptionDetails}} = await Protocol.Runtime.evaluate({expression, throwOnSideEffect: true});
+ InspectorTest.log(expression + ' : ' + (exceptionDetails ? 'throws' : 'ok'));
+};
+
+InspectorTest.runAsyncTestSuite([
+ async function testCollator() {
+ await Protocol.Runtime.enable();
+
+ // static methods
+ await check('Intl.Collator.supportedLocalesOf(["en-US"])');
+
+ // constructor
+ await check('new Intl.Collator("en-US")');
+
+ // methods
+ await check('someGlobalCollator.compare("foo", "bar")');
+ await check('someGlobalCollator.resolvedOptions()');
+
+ await Protocol.Runtime.disable();
+ },
+
+ async function testDateTimeFormat() {
+ await Protocol.Runtime.enable();
+
+ // static methods
+ await check('Intl.DateTimeFormat.supportedLocalesOf(["en-US"])');
+
+ // constructor
+ await check('new Intl.DateTimeFormat("en-US")');
+
+ // methods
+ await check('someGlobalDateTimeFormat.format(new Date(2021, 5))');
+ await check('someGlobalDateTimeFormat.formatToParts(new Date(2021, 5))');
+ await check('someGlobalDateTimeFormat.resolvedOptions()');
+ await check('someGlobalDateTimeFormat.formatRange(new Date(2021, 5), new Date(2022, 1))');
+ await check('someGlobalDateTimeFormat.formatRangeToParts(new Date(2021, 5), new Date(2022, 1))');
+
+ await Protocol.Runtime.disable();
+ },
+
+ async function testDisplayNames() {
+ await Protocol.Runtime.enable();
+
+ // static methods
+ await check('Intl.DisplayNames.supportedLocalesOf(["en-US"])');
+
+ // constructor
+ await check('new Intl.DisplayNames(["en-US"], {type: "region"})');
+
+ // methods
+ await check('someGlobalDisplayNames.of("en")');
+ await check('someGlobalDisplayNames.resolvedOptions()');
+
+ await Protocol.Runtime.disable();
+ },
+
+ async function testIntl() {
+ await Protocol.Runtime.enable();
+
+ // static methods
+ await check('Intl.getCanonicalLocales("en-US")');
+
+ await Protocol.Runtime.disable();
+ },
+
+ async function testListFormat() {
+ await Protocol.Runtime.enable();
+
+ // static methods
+ await check('Intl.ListFormat.supportedLocalesOf(["en-US"])');
+
+ // constructor
+ await check('new Intl.ListFormat("en", { style: "long", type: "conjunction" });')
+
+ // methods
+ await check('someGlobalListFormat.format(["a", "b"])');
+ await check('someGlobalListFormat.formatToParts(["a", "b"])');
+ await check('someGlobalListFormat.resolvedOptions()');
+
+ await Protocol.Runtime.disable();
+ },
+
+ async function testLocale() {
+ await Protocol.Runtime.enable();
+
+ // constructor
+ await check('new Intl.Locale("en-US")')
+
+ // getters
+ await check('someGlobalLocale.baseName');
+ await check('someGlobalLocale.calendar');
+ await check('someGlobalLocale.calendars');
+ await check('someGlobalLocale.caseFirst');
+ await check('someGlobalLocale.collation');
+ await check('someGlobalLocale.hourCycle');
+ await check('someGlobalLocale.hourCycles');
+ await check('someGlobalLocale.language');
+ await check('someGlobalLocale.numberingSystem');
+ await check('someGlobalLocale.numberingSystems');
+ await check('someGlobalLocale.numeric');
+ await check('someGlobalLocale.region');
+ await check('someGlobalLocale.script');
+ await check('someGlobalLocale.textInfo');
+ await check('someGlobalLocale.timeZones');
+ await check('someGlobalLocale.weekInfo');
+
+ // methods
+ await check('someGlobalLocale.maximize()');
+ await check('someGlobalLocale.minimize()');
+ await check('someGlobalLocale.toString()');
+
+ await Protocol.Runtime.disable();
+ },
+
+ async function testNumberFormat() {
+ await Protocol.Runtime.enable();
+
+ // static methods
+ await check('Intl.NumberFormat.supportedLocalesOf(["en-US"])');
+
+ // constructor
+ await check('new Intl.NumberFormat("de-DE", { style: "currency", currency: "EUR" })');
+
+ // methods
+ await check('someGlobalNumberFormat.format(1)');
+ await check('someGlobalNumberFormat.formatToParts(1)');
+ await check('someGlobalNumberFormat.resolvedOptions()');
+
+ await Protocol.Runtime.disable();
+ },
+
+ async function testPluralRules() {
+ await Protocol.Runtime.enable();
+
+ // static methods
+ await check('Intl.PluralRules.supportedLocalesOf(["en-US"])');
+
+ // constructor
+ await check('new Intl.PluralRules("en-US")');
+
+ // methods
+ await check('someGlobalPluralRules.resolvedOptions()');
+ await check('someGlobalPluralRules.select(42)');
+
+ await Protocol.Runtime.disable();
+ },
+
+ async function testRelativeTimeFormat() {
+ await Protocol.Runtime.enable();
+
+ // static methods
+ await check('Intl.RelativeTimeFormat.supportedLocalesOf(["en-US"])');
+
+ // constructor
+ await check('new Intl.RelativeTimeFormat("en-US", {style: "narrow"})');
+
+ // methods
+ await check('someGlobalRelativeTimeFormat.format(2, "day")');
+ await check('someGlobalRelativeTimeFormat.formatToParts(2, "day")');
+ await check('someGlobalRelativeTimeFormat.resolvedOptions()');
+
+ await Protocol.Runtime.disable();
+ }
+]);
diff --git a/deps/v8/test/inspector/runtime/get-properties-expected.txt b/deps/v8/test/inspector/runtime/get-properties-expected.txt
index 09d3b8c682..b7a071fb70 100644
--- a/deps/v8/test/inspector/runtime/get-properties-expected.txt
+++ b/deps/v8/test/inspector/runtime/get-properties-expected.txt
@@ -176,7 +176,7 @@ Running test: testDetachedArrayBuffer
Running test: testArrayBufferWithBrokenUintCtor
Internal properties
[[ArrayBufferByteLength]] number 7
- [[ArrayBufferData]] string 0x...
+ [[ArrayBufferData]] number 4
[[Int8Array]] object undefined
[[Prototype]] object undefined
[[Uint8Array]] object undefined
diff --git a/deps/v8/test/inspector/runtime/get-properties.js b/deps/v8/test/inspector/runtime/get-properties.js
index c2ffedfd0a..d536ee57e6 100644
--- a/deps/v8/test/inspector/runtime/get-properties.js
+++ b/deps/v8/test/inspector/runtime/get-properties.js
@@ -155,11 +155,7 @@ async function logGetPropertiesResult(objectId, flags = { ownProperties: true })
for (var i = 0; i < array.length; i++) {
var p = array[i];
var v = p.value;
- if (p.name == "[[ArrayBufferData]]")
- // Hex value for pointer is non-deterministic
- InspectorTest.log(` ${p.name} ${v.type} ${v.value.substr(0, 2)}...`);
- else
- InspectorTest.log(` ${p.name} ${v.type} ${v.value}`);
+ InspectorTest.log(` ${p.name} ${v.type} ${v.value}`);
}
}
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index b0d74da267..0cd2932535 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -95,6 +95,13 @@
}], # 'msan == True'
##############################################################################
+['tsan == True', {
+ # Large allocations
+ 'fail/map-grow-failed': [SKIP],
+ 'fail/set-grow-failed': [SKIP],
+}], # 'tsan == True'
+
+##############################################################################
['simulator_run', {
# Too slow on simulators
'fail/map-grow-failed': [SKIP],
diff --git a/deps/v8/test/mjsunit/compiler/bigint-add-no-deopt-loop.js b/deps/v8/test/mjsunit/compiler/bigint-add-no-deopt-loop.js
index 2004477932..f8ad1035ae 100644
--- a/deps/v8/test/mjsunit/compiler/bigint-add-no-deopt-loop.js
+++ b/deps/v8/test/mjsunit/compiler/bigint-add-no-deopt-loop.js
@@ -24,7 +24,9 @@ assertEquals(testAdd(6n, 2n), 8n);
assertOptimized(testAdd);
assertThrows(() => testAdd(big, big), RangeError);
-assertUnoptimized(testAdd);
+if (%Is64Bit()) {
+ assertUnoptimized(testAdd);
+}
testAdd(30n, -50n);
testAdd(23n, 5n);
diff --git a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-4.js b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-4.js
index 49da8832f0..02b8aa2373 100644
--- a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-4.js
+++ b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-4.js
@@ -41,6 +41,7 @@
%OptimizeFunctionForTopTier(foo);
assertEquals(78, foo(26, 6, 46, null));
assertOptimized(foo);
+ %PrepareFunctionForOptimization(foo);
if (i < 3) {
assertFalse(sum_js_got_interpreted);
@@ -60,6 +61,7 @@
assertUnoptimized(foo);
} else {
assertOptimized(foo);
+ %PrepareFunctionForOptimization(foo);
}
}
})();
diff --git a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-7.js b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-7.js
index 96e50dd906..1655ba54ca 100644
--- a/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-7.js
+++ b/deps/v8/test/mjsunit/compiler/call-with-arraylike-or-spread-7.js
@@ -43,6 +43,7 @@
// The call with spread should have been inlined.
assertFalse(log_got_interpreted);
assertOptimized(foo);
+ %PrepareFunctionForOptimization(foo);
// This invalidates the DependOnArrayIteratorProtector and causes deopt.
Object.defineProperty(Array.prototype, Symbol.iterator, {
diff --git a/deps/v8/test/mjsunit/compiler/regress-crbug-1228407.js b/deps/v8/test/mjsunit/compiler/regress-crbug-1228407.js
new file mode 100644
index 0000000000..f01eafb80e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-crbug-1228407.js
@@ -0,0 +1,24 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --interrupt-budget=100
+
+function foo() {
+ return function bar() {
+ a.p = 42;
+ for (let i = 0; i < 100; i++) this.p();
+ this.p = a;
+ };
+}
+
+var a = foo();
+var b = foo();
+
+a.prototype = { p() {} };
+b.prototype = { p() {
+ this.q = new a();
+ for (let i = 0; i < 200; i++) ;
+}};
+
+new b();
diff --git a/deps/v8/test/mjsunit/compiler/regress-crbug-1234764.js b/deps/v8/test/mjsunit/compiler/regress-crbug-1234764.js
new file mode 100644
index 0000000000..eca9346d17
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-crbug-1234764.js
@@ -0,0 +1,21 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(arg_true) {
+ let o = {c0: 0};
+ let c0a = arg_true ? 0 : "x";
+ let c0 = Math.max(c0a, 0) + c0a;
+ let v01 = 2**32 + (o.c0 & 1);
+ let ra = ((2**32 - 1) >>> c0) - v01;
+ let rb = (-1) << (32 - c0);
+ return (ra^rb) >> 31;
+}
+
+%PrepareFunctionForOptimization(foo);
+assertEquals(0, foo(true));
+assertEquals(0, foo(true));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(0, foo(true));
diff --git a/deps/v8/test/mjsunit/compiler/regress-crbug-1234770.js b/deps/v8/test/mjsunit/compiler/regress-crbug-1234770.js
new file mode 100644
index 0000000000..22f68db902
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-crbug-1234770.js
@@ -0,0 +1,14 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(a) {
+ return ((a & 1) == 1) & ((a & 2) == 1);
+}
+
+%PrepareFunctionForOptimization(foo);
+assertEquals(0, foo(1));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(0, foo(1));
diff --git a/deps/v8/test/mjsunit/compiler/regress-crbug-1247763.js b/deps/v8/test/mjsunit/compiler/regress-crbug-1247763.js
new file mode 100644
index 0000000000..760fb92d08
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-crbug-1247763.js
@@ -0,0 +1,30 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+class C extends Array {};
+%NeverOptimizeFunction(C);
+
+for (let i = 0; i < 3; i++) {
+
+ function store_global() { global = new C(); };
+ store_global();
+ %PrepareFunctionForOptimization(store_global);
+ store_global();
+ %OptimizeFunctionOnNextCall(store_global);
+ store_global();
+
+ new C(42);
+
+ function load_global() { global.p1 = {}; global.p2 = {}; }
+ if (i) {
+ load_global();
+ %PrepareFunctionForOptimization(load_global);
+ load_global();
+ %OptimizeFunctionOnNextCall(load_global);
+ load_global();
+ }
+
+}
diff --git a/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect2.js b/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect2.js
index de92b8d211..9fcaa0b04a 100644
--- a/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect2.js
+++ b/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect2.js
@@ -38,6 +38,11 @@ function TestMapConstructorEntrySideEffect(ctor) {
ctor.prototype.set = originalPrototypeSet;
}
+// Forbid inlining these helper functions to avoid deopt surprises.
+%NeverOptimizeFunction(assertEquals);
+%NeverOptimizeFunction(assertFalse);
+%NeverOptimizeFunction(assertTrue);
+
%PrepareFunctionForOptimization(TestMapConstructorEntrySideEffect);
TestMapConstructorEntrySideEffect(Map);
TestMapConstructorEntrySideEffect(Map);
@@ -48,6 +53,7 @@ assertOptimized(TestMapConstructorEntrySideEffect);
// This call would deopt
TestMapConstructorEntrySideEffect(WeakMap);
+
%PrepareFunctionForOptimization(TestMapConstructorEntrySideEffect);
TestMapConstructorEntrySideEffect(WeakMap);
TestMapConstructorEntrySideEffect(WeakMap);
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-5929-1.js b/deps/v8/test/mjsunit/es6/regress/regress-5929-1.js
index 5f361f3a78..c4653cbc8a 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-5929-1.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-5929-1.js
@@ -11,4 +11,6 @@ tmp[Symbol.toPrimitive] = function () {
%ArrayBufferDetach(arr.buffer);
return 50;
}
-arr.copyWithin(tmp);
+assertThrows(function() {
+ arr.copyWithin(tmp);
+}, TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/private-brand-checks.js b/deps/v8/test/mjsunit/harmony/private-brand-checks.js
index 8ee8774480..45dd384504 100644
--- a/deps/v8/test/mjsunit/harmony/private-brand-checks.js
+++ b/deps/v8/test/mjsunit/harmony/private-brand-checks.js
@@ -565,3 +565,8 @@ const commonThrowCases = [100, 'foo', undefined, null];
assertFalse(d.exfilEval(c));
assertFalse(d.exfilEval(d));
})();
+
+(function TestBinaryOperatorPrecedenceParseError() {
+ assertThrows(() => eval(`class C { #x; test() { 0 << #x in {} } }`),
+ SyntaxError);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-emoji-flag-sequence-generated.js b/deps/v8/test/mjsunit/harmony/regexp-property-emoji-flag-sequence-generated.js
deleted file mode 100644
index 1de0a48e3f..0000000000
--- a/deps/v8/test/mjsunit/harmony/regexp-property-emoji-flag-sequence-generated.js
+++ /dev/null
@@ -1,270 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-regexp-sequence
-
-// These tests have been generated by the script at
-// https://gist.github.com/mathiasbynens/3b42c99a227521dabfe68d9e63f00f42.
-// Do not modify this file directly!
-
-const re = /\p{Emoji_Flag_Sequence}/u;
-
-assertTrue(re.test('\u{1F1E6}\u{1F1E8}'));
-assertTrue(re.test('\u{1F1FF}\u{1F1FC}'));
-assertTrue(re.test('\u{1F1E6}\u{1F1EA}'));
-assertTrue(re.test('\u{1F1E6}\u{1F1EB}'));
-assertTrue(re.test('\u{1F1E6}\u{1F1EC}'));
-assertTrue(re.test('\u{1F1E6}\u{1F1EE}'));
-assertTrue(re.test('\u{1F1E6}\u{1F1F1}'));
-assertTrue(re.test('\u{1F1E6}\u{1F1F2}'));
-assertTrue(re.test('\u{1F1E6}\u{1F1F4}'));
-assertTrue(re.test('\u{1F1E6}\u{1F1F6}'));
-assertTrue(re.test('\u{1F1E6}\u{1F1F7}'));
-assertTrue(re.test('\u{1F1E6}\u{1F1F8}'));
-assertTrue(re.test('\u{1F1E6}\u{1F1F9}'));
-assertTrue(re.test('\u{1F1E6}\u{1F1FA}'));
-assertTrue(re.test('\u{1F1E6}\u{1F1FC}'));
-assertTrue(re.test('\u{1F1E6}\u{1F1FD}'));
-assertTrue(re.test('\u{1F1E6}\u{1F1FF}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1E6}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1E7}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1E9}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1EA}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1EB}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1EC}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1ED}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1EE}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1EF}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1F1}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1F2}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1F3}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1F4}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1F6}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1F7}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1F8}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1F9}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1FB}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1FC}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1FE}'));
-assertTrue(re.test('\u{1F1E7}\u{1F1FF}'));
-assertTrue(re.test('\u{1F1E8}\u{1F1E6}'));
-assertTrue(re.test('\u{1F1E8}\u{1F1E8}'));
-assertTrue(re.test('\u{1F1E8}\u{1F1E9}'));
-assertTrue(re.test('\u{1F1E8}\u{1F1EB}'));
-assertTrue(re.test('\u{1F1E8}\u{1F1EC}'));
-assertTrue(re.test('\u{1F1E8}\u{1F1ED}'));
-assertTrue(re.test('\u{1F1E8}\u{1F1EE}'));
-assertTrue(re.test('\u{1F1E8}\u{1F1F0}'));
-assertTrue(re.test('\u{1F1E8}\u{1F1F1}'));
-assertTrue(re.test('\u{1F1E8}\u{1F1F2}'));
-assertTrue(re.test('\u{1F1E8}\u{1F1F3}'));
-assertTrue(re.test('\u{1F1E8}\u{1F1F4}'));
-assertTrue(re.test('\u{1F1E8}\u{1F1F5}'));
-assertTrue(re.test('\u{1F1E8}\u{1F1F7}'));
-assertTrue(re.test('\u{1F1E8}\u{1F1FA}'));
-assertTrue(re.test('\u{1F1E8}\u{1F1FB}'));
-assertTrue(re.test('\u{1F1E8}\u{1F1FC}'));
-assertTrue(re.test('\u{1F1E8}\u{1F1FD}'));
-assertTrue(re.test('\u{1F1E8}\u{1F1FE}'));
-assertTrue(re.test('\u{1F1E8}\u{1F1FF}'));
-assertTrue(re.test('\u{1F1E9}\u{1F1EA}'));
-assertTrue(re.test('\u{1F1E9}\u{1F1EC}'));
-assertTrue(re.test('\u{1F1E9}\u{1F1EF}'));
-assertTrue(re.test('\u{1F1E9}\u{1F1F0}'));
-assertTrue(re.test('\u{1F1E9}\u{1F1F2}'));
-assertTrue(re.test('\u{1F1E9}\u{1F1F4}'));
-assertTrue(re.test('\u{1F1E9}\u{1F1FF}'));
-assertTrue(re.test('\u{1F1EA}\u{1F1E6}'));
-assertTrue(re.test('\u{1F1EA}\u{1F1E8}'));
-assertTrue(re.test('\u{1F1EA}\u{1F1EA}'));
-assertTrue(re.test('\u{1F1EA}\u{1F1EC}'));
-assertTrue(re.test('\u{1F1EA}\u{1F1ED}'));
-assertTrue(re.test('\u{1F1EA}\u{1F1F7}'));
-assertTrue(re.test('\u{1F1EA}\u{1F1F8}'));
-assertTrue(re.test('\u{1F1EA}\u{1F1F9}'));
-assertTrue(re.test('\u{1F1EA}\u{1F1FA}'));
-assertTrue(re.test('\u{1F1EB}\u{1F1EE}'));
-assertTrue(re.test('\u{1F1EB}\u{1F1EF}'));
-assertTrue(re.test('\u{1F1EB}\u{1F1F0}'));
-assertTrue(re.test('\u{1F1EB}\u{1F1F2}'));
-assertTrue(re.test('\u{1F1EB}\u{1F1F4}'));
-assertTrue(re.test('\u{1F1EB}\u{1F1F7}'));
-assertTrue(re.test('\u{1F1EC}\u{1F1E6}'));
-assertTrue(re.test('\u{1F1EC}\u{1F1E7}'));
-assertTrue(re.test('\u{1F1EC}\u{1F1E9}'));
-assertTrue(re.test('\u{1F1EC}\u{1F1EA}'));
-assertTrue(re.test('\u{1F1EC}\u{1F1EB}'));
-assertTrue(re.test('\u{1F1EC}\u{1F1EC}'));
-assertTrue(re.test('\u{1F1EC}\u{1F1ED}'));
-assertTrue(re.test('\u{1F1EC}\u{1F1EE}'));
-assertTrue(re.test('\u{1F1EC}\u{1F1F1}'));
-assertTrue(re.test('\u{1F1EC}\u{1F1F2}'));
-assertTrue(re.test('\u{1F1EC}\u{1F1F3}'));
-assertTrue(re.test('\u{1F1EC}\u{1F1F5}'));
-assertTrue(re.test('\u{1F1EC}\u{1F1F6}'));
-assertTrue(re.test('\u{1F1EC}\u{1F1F7}'));
-assertTrue(re.test('\u{1F1EC}\u{1F1F8}'));
-assertTrue(re.test('\u{1F1EC}\u{1F1F9}'));
-assertTrue(re.test('\u{1F1EC}\u{1F1FA}'));
-assertTrue(re.test('\u{1F1EC}\u{1F1FC}'));
-assertTrue(re.test('\u{1F1EC}\u{1F1FE}'));
-assertTrue(re.test('\u{1F1ED}\u{1F1F0}'));
-assertTrue(re.test('\u{1F1ED}\u{1F1F2}'));
-assertTrue(re.test('\u{1F1ED}\u{1F1F3}'));
-assertTrue(re.test('\u{1F1ED}\u{1F1F7}'));
-assertTrue(re.test('\u{1F1ED}\u{1F1F9}'));
-assertTrue(re.test('\u{1F1ED}\u{1F1FA}'));
-assertTrue(re.test('\u{1F1EE}\u{1F1E8}'));
-assertTrue(re.test('\u{1F1EE}\u{1F1E9}'));
-assertTrue(re.test('\u{1F1EE}\u{1F1EA}'));
-assertTrue(re.test('\u{1F1EE}\u{1F1F1}'));
-assertTrue(re.test('\u{1F1EE}\u{1F1F2}'));
-assertTrue(re.test('\u{1F1EE}\u{1F1F3}'));
-assertTrue(re.test('\u{1F1EE}\u{1F1F4}'));
-assertTrue(re.test('\u{1F1EE}\u{1F1F6}'));
-assertTrue(re.test('\u{1F1EE}\u{1F1F7}'));
-assertTrue(re.test('\u{1F1EE}\u{1F1F8}'));
-assertTrue(re.test('\u{1F1EE}\u{1F1F9}'));
-assertTrue(re.test('\u{1F1EF}\u{1F1EA}'));
-assertTrue(re.test('\u{1F1EF}\u{1F1F2}'));
-assertTrue(re.test('\u{1F1EF}\u{1F1F4}'));
-assertTrue(re.test('\u{1F1EF}\u{1F1F5}'));
-assertTrue(re.test('\u{1F1F0}\u{1F1EA}'));
-assertTrue(re.test('\u{1F1F0}\u{1F1EC}'));
-assertTrue(re.test('\u{1F1F0}\u{1F1ED}'));
-assertTrue(re.test('\u{1F1F0}\u{1F1EE}'));
-assertTrue(re.test('\u{1F1F0}\u{1F1F2}'));
-assertTrue(re.test('\u{1F1F0}\u{1F1F3}'));
-assertTrue(re.test('\u{1F1F0}\u{1F1F5}'));
-assertTrue(re.test('\u{1F1F0}\u{1F1F7}'));
-assertTrue(re.test('\u{1F1F0}\u{1F1FC}'));
-assertTrue(re.test('\u{1F1E6}\u{1F1E9}'));
-assertTrue(re.test('\u{1F1F0}\u{1F1FF}'));
-assertTrue(re.test('\u{1F1F1}\u{1F1E6}'));
-assertTrue(re.test('\u{1F1F1}\u{1F1E7}'));
-assertTrue(re.test('\u{1F1F1}\u{1F1E8}'));
-assertTrue(re.test('\u{1F1F1}\u{1F1EE}'));
-assertTrue(re.test('\u{1F1F1}\u{1F1F0}'));
-assertTrue(re.test('\u{1F1F1}\u{1F1F7}'));
-assertTrue(re.test('\u{1F1F1}\u{1F1F8}'));
-assertTrue(re.test('\u{1F1F1}\u{1F1F9}'));
-assertTrue(re.test('\u{1F1F1}\u{1F1FA}'));
-assertTrue(re.test('\u{1F1F1}\u{1F1FB}'));
-assertTrue(re.test('\u{1F1F1}\u{1F1FE}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1E6}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1E8}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1E9}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1EA}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1EB}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1EC}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1ED}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1F0}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1F1}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1F2}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1F3}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1F4}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1F5}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1F6}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1F7}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1F8}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1F9}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1FA}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1FB}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1FC}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1FD}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1FE}'));
-assertTrue(re.test('\u{1F1F2}\u{1F1FF}'));
-assertTrue(re.test('\u{1F1F3}\u{1F1E6}'));
-assertTrue(re.test('\u{1F1F3}\u{1F1E8}'));
-assertTrue(re.test('\u{1F1F3}\u{1F1EA}'));
-assertTrue(re.test('\u{1F1F3}\u{1F1EB}'));
-assertTrue(re.test('\u{1F1F3}\u{1F1EC}'));
-assertTrue(re.test('\u{1F1F3}\u{1F1EE}'));
-assertTrue(re.test('\u{1F1F3}\u{1F1F1}'));
-assertTrue(re.test('\u{1F1F3}\u{1F1F4}'));
-assertTrue(re.test('\u{1F1F3}\u{1F1F5}'));
-assertTrue(re.test('\u{1F1F3}\u{1F1F7}'));
-assertTrue(re.test('\u{1F1F3}\u{1F1FA}'));
-assertTrue(re.test('\u{1F1F3}\u{1F1FF}'));
-assertTrue(re.test('\u{1F1F4}\u{1F1F2}'));
-assertTrue(re.test('\u{1F1F5}\u{1F1E6}'));
-assertTrue(re.test('\u{1F1F5}\u{1F1EA}'));
-assertTrue(re.test('\u{1F1F5}\u{1F1EB}'));
-assertTrue(re.test('\u{1F1F5}\u{1F1EC}'));
-assertTrue(re.test('\u{1F1F5}\u{1F1ED}'));
-assertTrue(re.test('\u{1F1F5}\u{1F1F0}'));
-assertTrue(re.test('\u{1F1F5}\u{1F1F1}'));
-assertTrue(re.test('\u{1F1F5}\u{1F1F2}'));
-assertTrue(re.test('\u{1F1F5}\u{1F1F3}'));
-assertTrue(re.test('\u{1F1F5}\u{1F1F7}'));
-assertTrue(re.test('\u{1F1F5}\u{1F1F8}'));
-assertTrue(re.test('\u{1F1F5}\u{1F1F9}'));
-assertTrue(re.test('\u{1F1F5}\u{1F1FC}'));
-assertTrue(re.test('\u{1F1F5}\u{1F1FE}'));
-assertTrue(re.test('\u{1F1F6}\u{1F1E6}'));
-assertTrue(re.test('\u{1F1F7}\u{1F1EA}'));
-assertTrue(re.test('\u{1F1F7}\u{1F1F4}'));
-assertTrue(re.test('\u{1F1F7}\u{1F1F8}'));
-assertTrue(re.test('\u{1F1F7}\u{1F1FA}'));
-assertTrue(re.test('\u{1F1F7}\u{1F1FC}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1E6}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1E7}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1E8}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1E9}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1EA}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1EC}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1ED}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1EE}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1EF}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1F0}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1F1}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1F2}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1F3}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1F4}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1F7}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1F8}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1F9}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1FB}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1FD}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1FE}'));
-assertTrue(re.test('\u{1F1F8}\u{1F1FF}'));
-assertTrue(re.test('\u{1F1F9}\u{1F1E6}'));
-assertTrue(re.test('\u{1F1F9}\u{1F1E8}'));
-assertTrue(re.test('\u{1F1F9}\u{1F1E9}'));
-assertTrue(re.test('\u{1F1F9}\u{1F1EB}'));
-assertTrue(re.test('\u{1F1F9}\u{1F1EC}'));
-assertTrue(re.test('\u{1F1F9}\u{1F1ED}'));
-assertTrue(re.test('\u{1F1F9}\u{1F1EF}'));
-assertTrue(re.test('\u{1F1F9}\u{1F1F0}'));
-assertTrue(re.test('\u{1F1F9}\u{1F1F1}'));
-assertTrue(re.test('\u{1F1F9}\u{1F1F2}'));
-assertTrue(re.test('\u{1F1F9}\u{1F1F3}'));
-assertTrue(re.test('\u{1F1F9}\u{1F1F4}'));
-assertTrue(re.test('\u{1F1F9}\u{1F1F7}'));
-assertTrue(re.test('\u{1F1F9}\u{1F1F9}'));
-assertTrue(re.test('\u{1F1F9}\u{1F1FB}'));
-assertTrue(re.test('\u{1F1F9}\u{1F1FC}'));
-assertTrue(re.test('\u{1F1F9}\u{1F1FF}'));
-assertTrue(re.test('\u{1F1FA}\u{1F1E6}'));
-assertTrue(re.test('\u{1F1FA}\u{1F1EC}'));
-assertTrue(re.test('\u{1F1FA}\u{1F1F2}'));
-assertTrue(re.test('\u{1F1FA}\u{1F1F3}'));
-assertTrue(re.test('\u{1F1FA}\u{1F1F8}'));
-assertTrue(re.test('\u{1F1FA}\u{1F1FE}'));
-assertTrue(re.test('\u{1F1FA}\u{1F1FF}'));
-assertTrue(re.test('\u{1F1FB}\u{1F1E6}'));
-assertTrue(re.test('\u{1F1FB}\u{1F1E8}'));
-assertTrue(re.test('\u{1F1FB}\u{1F1EA}'));
-assertTrue(re.test('\u{1F1FB}\u{1F1EC}'));
-assertTrue(re.test('\u{1F1FB}\u{1F1EE}'));
-assertTrue(re.test('\u{1F1FB}\u{1F1F3}'));
-assertTrue(re.test('\u{1F1FB}\u{1F1FA}'));
-assertTrue(re.test('\u{1F1FC}\u{1F1EB}'));
-assertTrue(re.test('\u{1F1FC}\u{1F1F8}'));
-assertTrue(re.test('\u{1F1FD}\u{1F1F0}'));
-assertTrue(re.test('\u{1F1FE}\u{1F1EA}'));
-assertTrue(re.test('\u{1F1FE}\u{1F1F9}'));
-assertTrue(re.test('\u{1F1FF}\u{1F1E6}'));
-assertTrue(re.test('\u{1F1FF}\u{1F1F2}'));
-assertTrue(re.test('\u{1F1F0}\u{1F1FE}'));
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-emoji-keycap-sequence-generated.js b/deps/v8/test/mjsunit/harmony/regexp-property-emoji-keycap-sequence-generated.js
deleted file mode 100644
index 2d72b474d9..0000000000
--- a/deps/v8/test/mjsunit/harmony/regexp-property-emoji-keycap-sequence-generated.js
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-regexp-sequence
-
-// These tests have been generated by the script at
-// https://gist.github.com/mathiasbynens/3b42c99a227521dabfe68d9e63f00f42.
-// Do not modify this file directly!
-
-const re = /\p{Emoji_Keycap_Sequence}/u;
-
-assertTrue(re.test('#\uFE0F\u20E3'));
-assertTrue(re.test('9\uFE0F\u20E3'));
-assertTrue(re.test('0\uFE0F\u20E3'));
-assertTrue(re.test('1\uFE0F\u20E3'));
-assertTrue(re.test('2\uFE0F\u20E3'));
-assertTrue(re.test('3\uFE0F\u20E3'));
-assertTrue(re.test('*\uFE0F\u20E3'));
-assertTrue(re.test('5\uFE0F\u20E3'));
-assertTrue(re.test('6\uFE0F\u20E3'));
-assertTrue(re.test('7\uFE0F\u20E3'));
-assertTrue(re.test('8\uFE0F\u20E3'));
-assertTrue(re.test('4\uFE0F\u20E3'));
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-emoji-modifier-sequence-generated.js b/deps/v8/test/mjsunit/harmony/regexp-property-emoji-modifier-sequence-generated.js
deleted file mode 100644
index b990bf7aaf..0000000000
--- a/deps/v8/test/mjsunit/harmony/regexp-property-emoji-modifier-sequence-generated.js
+++ /dev/null
@@ -1,541 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-regexp-sequence
-
-// TODO(mathias): Update these tests once a Unicode 12-friendly ICU
-// version rolls into V8.
-
-const re = /\p{Emoji_Modifier_Sequence}/u;
-
-assertTrue(re.test('\u261D\u{1F3FB}'));
-assertTrue(re.test('\u{1F9DD}\u{1F3FF}'));
-assertTrue(re.test('\u261D\u{1F3FD}'));
-assertTrue(re.test('\u261D\u{1F3FE}'));
-assertTrue(re.test('\u261D\u{1F3FF}'));
-assertTrue(re.test('\u26F9\u{1F3FB}'));
-assertTrue(re.test('\u26F9\u{1F3FC}'));
-assertTrue(re.test('\u26F9\u{1F3FD}'));
-assertTrue(re.test('\u26F9\u{1F3FE}'));
-assertTrue(re.test('\u26F9\u{1F3FF}'));
-assertTrue(re.test('\u270A\u{1F3FB}'));
-assertTrue(re.test('\u270A\u{1F3FC}'));
-assertTrue(re.test('\u270A\u{1F3FD}'));
-assertTrue(re.test('\u270A\u{1F3FE}'));
-assertTrue(re.test('\u270A\u{1F3FF}'));
-assertTrue(re.test('\u270B\u{1F3FB}'));
-assertTrue(re.test('\u270B\u{1F3FC}'));
-assertTrue(re.test('\u270B\u{1F3FD}'));
-assertTrue(re.test('\u270B\u{1F3FE}'));
-assertTrue(re.test('\u270B\u{1F3FF}'));
-assertTrue(re.test('\u270C\u{1F3FB}'));
-assertTrue(re.test('\u270C\u{1F3FC}'));
-assertTrue(re.test('\u270C\u{1F3FD}'));
-assertTrue(re.test('\u270C\u{1F3FE}'));
-assertTrue(re.test('\u270C\u{1F3FF}'));
-assertTrue(re.test('\u270D\u{1F3FB}'));
-assertTrue(re.test('\u270D\u{1F3FC}'));
-assertTrue(re.test('\u270D\u{1F3FD}'));
-assertTrue(re.test('\u270D\u{1F3FE}'));
-assertTrue(re.test('\u270D\u{1F3FF}'));
-assertTrue(re.test('\u{1F385}\u{1F3FB}'));
-assertTrue(re.test('\u{1F385}\u{1F3FC}'));
-assertTrue(re.test('\u{1F385}\u{1F3FD}'));
-assertTrue(re.test('\u{1F385}\u{1F3FE}'));
-assertTrue(re.test('\u{1F385}\u{1F3FF}'));
-assertTrue(re.test('\u{1F3C2}\u{1F3FB}'));
-assertTrue(re.test('\u{1F3C2}\u{1F3FC}'));
-assertTrue(re.test('\u{1F3C2}\u{1F3FD}'));
-assertTrue(re.test('\u{1F3C2}\u{1F3FE}'));
-assertTrue(re.test('\u{1F3C2}\u{1F3FF}'));
-assertTrue(re.test('\u{1F3C3}\u{1F3FB}'));
-assertTrue(re.test('\u{1F3C3}\u{1F3FC}'));
-assertTrue(re.test('\u{1F3C3}\u{1F3FD}'));
-assertTrue(re.test('\u{1F3C3}\u{1F3FE}'));
-assertTrue(re.test('\u{1F3C3}\u{1F3FF}'));
-assertTrue(re.test('\u{1F3C4}\u{1F3FB}'));
-assertTrue(re.test('\u{1F3C4}\u{1F3FC}'));
-assertTrue(re.test('\u{1F3C4}\u{1F3FD}'));
-assertTrue(re.test('\u{1F3C4}\u{1F3FE}'));
-assertTrue(re.test('\u{1F3C4}\u{1F3FF}'));
-assertTrue(re.test('\u{1F3C7}\u{1F3FB}'));
-assertTrue(re.test('\u{1F3C7}\u{1F3FC}'));
-assertTrue(re.test('\u{1F3C7}\u{1F3FD}'));
-assertTrue(re.test('\u{1F3C7}\u{1F3FE}'));
-assertTrue(re.test('\u{1F3C7}\u{1F3FF}'));
-assertTrue(re.test('\u{1F3CA}\u{1F3FB}'));
-assertTrue(re.test('\u{1F3CA}\u{1F3FC}'));
-assertTrue(re.test('\u{1F3CA}\u{1F3FD}'));
-assertTrue(re.test('\u{1F3CA}\u{1F3FE}'));
-assertTrue(re.test('\u{1F3CA}\u{1F3FF}'));
-assertTrue(re.test('\u{1F3CB}\u{1F3FB}'));
-assertTrue(re.test('\u{1F3CB}\u{1F3FC}'));
-assertTrue(re.test('\u{1F3CB}\u{1F3FD}'));
-assertTrue(re.test('\u{1F3CB}\u{1F3FE}'));
-assertTrue(re.test('\u{1F3CB}\u{1F3FF}'));
-assertTrue(re.test('\u{1F3CC}\u{1F3FB}'));
-assertTrue(re.test('\u{1F3CC}\u{1F3FC}'));
-assertTrue(re.test('\u{1F3CC}\u{1F3FD}'));
-assertTrue(re.test('\u{1F3CC}\u{1F3FE}'));
-assertTrue(re.test('\u{1F3CC}\u{1F3FF}'));
-assertTrue(re.test('\u{1F442}\u{1F3FB}'));
-assertTrue(re.test('\u{1F442}\u{1F3FC}'));
-assertTrue(re.test('\u{1F442}\u{1F3FD}'));
-assertTrue(re.test('\u{1F442}\u{1F3FE}'));
-assertTrue(re.test('\u{1F442}\u{1F3FF}'));
-assertTrue(re.test('\u{1F443}\u{1F3FB}'));
-assertTrue(re.test('\u{1F443}\u{1F3FC}'));
-assertTrue(re.test('\u{1F443}\u{1F3FD}'));
-assertTrue(re.test('\u{1F443}\u{1F3FE}'));
-assertTrue(re.test('\u{1F443}\u{1F3FF}'));
-assertTrue(re.test('\u{1F446}\u{1F3FB}'));
-assertTrue(re.test('\u{1F446}\u{1F3FC}'));
-assertTrue(re.test('\u{1F446}\u{1F3FD}'));
-assertTrue(re.test('\u{1F446}\u{1F3FE}'));
-assertTrue(re.test('\u{1F446}\u{1F3FF}'));
-assertTrue(re.test('\u{1F447}\u{1F3FB}'));
-assertTrue(re.test('\u{1F447}\u{1F3FC}'));
-assertTrue(re.test('\u{1F447}\u{1F3FD}'));
-assertTrue(re.test('\u{1F447}\u{1F3FE}'));
-assertTrue(re.test('\u{1F447}\u{1F3FF}'));
-assertTrue(re.test('\u{1F448}\u{1F3FB}'));
-assertTrue(re.test('\u{1F448}\u{1F3FC}'));
-assertTrue(re.test('\u{1F448}\u{1F3FD}'));
-assertTrue(re.test('\u{1F448}\u{1F3FE}'));
-assertTrue(re.test('\u{1F448}\u{1F3FF}'));
-assertTrue(re.test('\u{1F449}\u{1F3FB}'));
-assertTrue(re.test('\u{1F449}\u{1F3FC}'));
-assertTrue(re.test('\u{1F449}\u{1F3FD}'));
-assertTrue(re.test('\u{1F449}\u{1F3FE}'));
-assertTrue(re.test('\u{1F449}\u{1F3FF}'));
-assertTrue(re.test('\u{1F44A}\u{1F3FB}'));
-assertTrue(re.test('\u{1F44A}\u{1F3FC}'));
-assertTrue(re.test('\u{1F44A}\u{1F3FD}'));
-assertTrue(re.test('\u{1F44A}\u{1F3FE}'));
-assertTrue(re.test('\u{1F44A}\u{1F3FF}'));
-assertTrue(re.test('\u{1F44B}\u{1F3FB}'));
-assertTrue(re.test('\u{1F44B}\u{1F3FC}'));
-assertTrue(re.test('\u{1F44B}\u{1F3FD}'));
-assertTrue(re.test('\u{1F44B}\u{1F3FE}'));
-assertTrue(re.test('\u{1F44B}\u{1F3FF}'));
-assertTrue(re.test('\u{1F44C}\u{1F3FB}'));
-assertTrue(re.test('\u{1F44C}\u{1F3FC}'));
-assertTrue(re.test('\u{1F44C}\u{1F3FD}'));
-assertTrue(re.test('\u{1F44C}\u{1F3FE}'));
-assertTrue(re.test('\u{1F44C}\u{1F3FF}'));
-assertTrue(re.test('\u{1F44D}\u{1F3FB}'));
-assertTrue(re.test('\u{1F44D}\u{1F3FC}'));
-assertTrue(re.test('\u{1F44D}\u{1F3FD}'));
-assertTrue(re.test('\u{1F44D}\u{1F3FE}'));
-assertTrue(re.test('\u{1F44D}\u{1F3FF}'));
-assertTrue(re.test('\u{1F44E}\u{1F3FB}'));
-assertTrue(re.test('\u{1F44E}\u{1F3FC}'));
-assertTrue(re.test('\u{1F44E}\u{1F3FD}'));
-assertTrue(re.test('\u{1F44E}\u{1F3FE}'));
-assertTrue(re.test('\u{1F44E}\u{1F3FF}'));
-assertTrue(re.test('\u{1F44F}\u{1F3FB}'));
-assertTrue(re.test('\u{1F44F}\u{1F3FC}'));
-assertTrue(re.test('\u{1F44F}\u{1F3FD}'));
-assertTrue(re.test('\u{1F44F}\u{1F3FE}'));
-assertTrue(re.test('\u{1F44F}\u{1F3FF}'));
-assertTrue(re.test('\u{1F450}\u{1F3FB}'));
-assertTrue(re.test('\u{1F450}\u{1F3FC}'));
-assertTrue(re.test('\u{1F450}\u{1F3FD}'));
-assertTrue(re.test('\u{1F450}\u{1F3FE}'));
-assertTrue(re.test('\u{1F450}\u{1F3FF}'));
-assertTrue(re.test('\u{1F466}\u{1F3FB}'));
-assertTrue(re.test('\u{1F466}\u{1F3FC}'));
-assertTrue(re.test('\u{1F466}\u{1F3FD}'));
-assertTrue(re.test('\u{1F466}\u{1F3FE}'));
-assertTrue(re.test('\u{1F466}\u{1F3FF}'));
-assertTrue(re.test('\u{1F467}\u{1F3FB}'));
-assertTrue(re.test('\u{1F467}\u{1F3FC}'));
-assertTrue(re.test('\u{1F467}\u{1F3FD}'));
-assertTrue(re.test('\u{1F467}\u{1F3FE}'));
-assertTrue(re.test('\u{1F467}\u{1F3FF}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}'));
-assertTrue(re.test('\u{1F46E}\u{1F3FB}'));
-assertTrue(re.test('\u{1F46E}\u{1F3FC}'));
-assertTrue(re.test('\u{1F46E}\u{1F3FD}'));
-assertTrue(re.test('\u{1F46E}\u{1F3FE}'));
-assertTrue(re.test('\u{1F46E}\u{1F3FF}'));
-assertTrue(re.test('\u{1F470}\u{1F3FB}'));
-assertTrue(re.test('\u{1F470}\u{1F3FC}'));
-assertTrue(re.test('\u{1F470}\u{1F3FD}'));
-assertTrue(re.test('\u{1F470}\u{1F3FE}'));
-assertTrue(re.test('\u{1F470}\u{1F3FF}'));
-assertTrue(re.test('\u{1F471}\u{1F3FB}'));
-assertTrue(re.test('\u{1F471}\u{1F3FC}'));
-assertTrue(re.test('\u{1F471}\u{1F3FD}'));
-assertTrue(re.test('\u{1F471}\u{1F3FE}'));
-assertTrue(re.test('\u{1F471}\u{1F3FF}'));
-assertTrue(re.test('\u{1F472}\u{1F3FB}'));
-assertTrue(re.test('\u{1F472}\u{1F3FC}'));
-assertTrue(re.test('\u{1F472}\u{1F3FD}'));
-assertTrue(re.test('\u{1F472}\u{1F3FE}'));
-assertTrue(re.test('\u{1F472}\u{1F3FF}'));
-assertTrue(re.test('\u{1F473}\u{1F3FB}'));
-assertTrue(re.test('\u{1F473}\u{1F3FC}'));
-assertTrue(re.test('\u{1F473}\u{1F3FD}'));
-assertTrue(re.test('\u{1F473}\u{1F3FE}'));
-assertTrue(re.test('\u{1F473}\u{1F3FF}'));
-assertTrue(re.test('\u{1F474}\u{1F3FB}'));
-assertTrue(re.test('\u{1F474}\u{1F3FC}'));
-assertTrue(re.test('\u{1F474}\u{1F3FD}'));
-assertTrue(re.test('\u{1F474}\u{1F3FE}'));
-assertTrue(re.test('\u{1F474}\u{1F3FF}'));
-assertTrue(re.test('\u{1F475}\u{1F3FB}'));
-assertTrue(re.test('\u{1F475}\u{1F3FC}'));
-assertTrue(re.test('\u{1F475}\u{1F3FD}'));
-assertTrue(re.test('\u{1F475}\u{1F3FE}'));
-assertTrue(re.test('\u{1F475}\u{1F3FF}'));
-assertTrue(re.test('\u{1F476}\u{1F3FB}'));
-assertTrue(re.test('\u{1F476}\u{1F3FC}'));
-assertTrue(re.test('\u{1F476}\u{1F3FD}'));
-assertTrue(re.test('\u{1F476}\u{1F3FE}'));
-assertTrue(re.test('\u{1F476}\u{1F3FF}'));
-assertTrue(re.test('\u{1F477}\u{1F3FB}'));
-assertTrue(re.test('\u{1F477}\u{1F3FC}'));
-assertTrue(re.test('\u{1F477}\u{1F3FD}'));
-assertTrue(re.test('\u{1F477}\u{1F3FE}'));
-assertTrue(re.test('\u{1F477}\u{1F3FF}'));
-assertTrue(re.test('\u{1F478}\u{1F3FB}'));
-assertTrue(re.test('\u{1F478}\u{1F3FC}'));
-assertTrue(re.test('\u{1F478}\u{1F3FD}'));
-assertTrue(re.test('\u{1F478}\u{1F3FE}'));
-assertTrue(re.test('\u{1F478}\u{1F3FF}'));
-assertTrue(re.test('\u{1F47C}\u{1F3FB}'));
-assertTrue(re.test('\u{1F47C}\u{1F3FC}'));
-assertTrue(re.test('\u{1F47C}\u{1F3FD}'));
-assertTrue(re.test('\u{1F47C}\u{1F3FE}'));
-assertTrue(re.test('\u{1F47C}\u{1F3FF}'));
-assertTrue(re.test('\u{1F481}\u{1F3FB}'));
-assertTrue(re.test('\u{1F481}\u{1F3FC}'));
-assertTrue(re.test('\u{1F481}\u{1F3FD}'));
-assertTrue(re.test('\u{1F481}\u{1F3FE}'));
-assertTrue(re.test('\u{1F481}\u{1F3FF}'));
-assertTrue(re.test('\u{1F482}\u{1F3FB}'));
-assertTrue(re.test('\u{1F482}\u{1F3FC}'));
-assertTrue(re.test('\u{1F482}\u{1F3FD}'));
-assertTrue(re.test('\u{1F482}\u{1F3FE}'));
-assertTrue(re.test('\u{1F482}\u{1F3FF}'));
-assertTrue(re.test('\u{1F483}\u{1F3FB}'));
-assertTrue(re.test('\u{1F483}\u{1F3FC}'));
-assertTrue(re.test('\u{1F483}\u{1F3FD}'));
-assertTrue(re.test('\u{1F483}\u{1F3FE}'));
-assertTrue(re.test('\u{1F483}\u{1F3FF}'));
-assertTrue(re.test('\u{1F485}\u{1F3FB}'));
-assertTrue(re.test('\u{1F485}\u{1F3FC}'));
-assertTrue(re.test('\u{1F485}\u{1F3FD}'));
-assertTrue(re.test('\u{1F485}\u{1F3FE}'));
-assertTrue(re.test('\u{1F485}\u{1F3FF}'));
-assertTrue(re.test('\u{1F486}\u{1F3FB}'));
-assertTrue(re.test('\u{1F486}\u{1F3FC}'));
-assertTrue(re.test('\u{1F486}\u{1F3FD}'));
-assertTrue(re.test('\u{1F486}\u{1F3FE}'));
-assertTrue(re.test('\u{1F486}\u{1F3FF}'));
-assertTrue(re.test('\u{1F487}\u{1F3FB}'));
-assertTrue(re.test('\u{1F487}\u{1F3FC}'));
-assertTrue(re.test('\u{1F487}\u{1F3FD}'));
-assertTrue(re.test('\u{1F487}\u{1F3FE}'));
-assertTrue(re.test('\u{1F487}\u{1F3FF}'));
-assertTrue(re.test('\u{1F4AA}\u{1F3FB}'));
-assertTrue(re.test('\u{1F4AA}\u{1F3FC}'));
-assertTrue(re.test('\u{1F4AA}\u{1F3FD}'));
-assertTrue(re.test('\u{1F4AA}\u{1F3FE}'));
-assertTrue(re.test('\u{1F4AA}\u{1F3FF}'));
-assertTrue(re.test('\u{1F574}\u{1F3FB}'));
-assertTrue(re.test('\u{1F574}\u{1F3FC}'));
-assertTrue(re.test('\u{1F574}\u{1F3FD}'));
-assertTrue(re.test('\u{1F574}\u{1F3FE}'));
-assertTrue(re.test('\u{1F574}\u{1F3FF}'));
-assertTrue(re.test('\u{1F575}\u{1F3FB}'));
-assertTrue(re.test('\u{1F575}\u{1F3FC}'));
-assertTrue(re.test('\u{1F575}\u{1F3FD}'));
-assertTrue(re.test('\u{1F575}\u{1F3FE}'));
-assertTrue(re.test('\u{1F575}\u{1F3FF}'));
-assertTrue(re.test('\u{1F57A}\u{1F3FB}'));
-assertTrue(re.test('\u{1F57A}\u{1F3FC}'));
-assertTrue(re.test('\u{1F57A}\u{1F3FD}'));
-assertTrue(re.test('\u{1F57A}\u{1F3FE}'));
-assertTrue(re.test('\u{1F57A}\u{1F3FF}'));
-assertTrue(re.test('\u{1F590}\u{1F3FB}'));
-assertTrue(re.test('\u{1F590}\u{1F3FC}'));
-assertTrue(re.test('\u{1F590}\u{1F3FD}'));
-assertTrue(re.test('\u{1F590}\u{1F3FE}'));
-assertTrue(re.test('\u{1F590}\u{1F3FF}'));
-assertTrue(re.test('\u261D\u{1F3FC}'));
-assertTrue(re.test('\u{1F595}\u{1F3FC}'));
-assertTrue(re.test('\u{1F595}\u{1F3FD}'));
-assertTrue(re.test('\u{1F595}\u{1F3FE}'));
-assertTrue(re.test('\u{1F595}\u{1F3FF}'));
-assertTrue(re.test('\u{1F596}\u{1F3FB}'));
-assertTrue(re.test('\u{1F596}\u{1F3FC}'));
-assertTrue(re.test('\u{1F596}\u{1F3FD}'));
-assertTrue(re.test('\u{1F596}\u{1F3FE}'));
-assertTrue(re.test('\u{1F596}\u{1F3FF}'));
-assertTrue(re.test('\u{1F645}\u{1F3FB}'));
-assertTrue(re.test('\u{1F645}\u{1F3FC}'));
-assertTrue(re.test('\u{1F645}\u{1F3FD}'));
-assertTrue(re.test('\u{1F645}\u{1F3FE}'));
-assertTrue(re.test('\u{1F645}\u{1F3FF}'));
-assertTrue(re.test('\u{1F646}\u{1F3FB}'));
-assertTrue(re.test('\u{1F646}\u{1F3FC}'));
-assertTrue(re.test('\u{1F646}\u{1F3FD}'));
-assertTrue(re.test('\u{1F646}\u{1F3FE}'));
-assertTrue(re.test('\u{1F646}\u{1F3FF}'));
-assertTrue(re.test('\u{1F647}\u{1F3FB}'));
-assertTrue(re.test('\u{1F647}\u{1F3FC}'));
-assertTrue(re.test('\u{1F647}\u{1F3FD}'));
-assertTrue(re.test('\u{1F647}\u{1F3FE}'));
-assertTrue(re.test('\u{1F647}\u{1F3FF}'));
-assertTrue(re.test('\u{1F64B}\u{1F3FB}'));
-assertTrue(re.test('\u{1F64B}\u{1F3FC}'));
-assertTrue(re.test('\u{1F64B}\u{1F3FD}'));
-assertTrue(re.test('\u{1F64B}\u{1F3FE}'));
-assertTrue(re.test('\u{1F64B}\u{1F3FF}'));
-assertTrue(re.test('\u{1F64C}\u{1F3FB}'));
-assertTrue(re.test('\u{1F64C}\u{1F3FC}'));
-assertTrue(re.test('\u{1F64C}\u{1F3FD}'));
-assertTrue(re.test('\u{1F64C}\u{1F3FE}'));
-assertTrue(re.test('\u{1F64C}\u{1F3FF}'));
-assertTrue(re.test('\u{1F64D}\u{1F3FB}'));
-assertTrue(re.test('\u{1F64D}\u{1F3FC}'));
-assertTrue(re.test('\u{1F64D}\u{1F3FD}'));
-assertTrue(re.test('\u{1F64D}\u{1F3FE}'));
-assertTrue(re.test('\u{1F64D}\u{1F3FF}'));
-assertTrue(re.test('\u{1F64E}\u{1F3FB}'));
-assertTrue(re.test('\u{1F64E}\u{1F3FC}'));
-assertTrue(re.test('\u{1F64E}\u{1F3FD}'));
-assertTrue(re.test('\u{1F64E}\u{1F3FE}'));
-assertTrue(re.test('\u{1F64E}\u{1F3FF}'));
-assertTrue(re.test('\u{1F64F}\u{1F3FB}'));
-assertTrue(re.test('\u{1F64F}\u{1F3FC}'));
-assertTrue(re.test('\u{1F64F}\u{1F3FD}'));
-assertTrue(re.test('\u{1F64F}\u{1F3FE}'));
-assertTrue(re.test('\u{1F64F}\u{1F3FF}'));
-assertTrue(re.test('\u{1F6A3}\u{1F3FB}'));
-assertTrue(re.test('\u{1F6A3}\u{1F3FC}'));
-assertTrue(re.test('\u{1F6A3}\u{1F3FD}'));
-assertTrue(re.test('\u{1F6A3}\u{1F3FE}'));
-assertTrue(re.test('\u{1F6A3}\u{1F3FF}'));
-assertTrue(re.test('\u{1F6B4}\u{1F3FB}'));
-assertTrue(re.test('\u{1F6B4}\u{1F3FC}'));
-assertTrue(re.test('\u{1F6B4}\u{1F3FD}'));
-assertTrue(re.test('\u{1F6B4}\u{1F3FE}'));
-assertTrue(re.test('\u{1F6B4}\u{1F3FF}'));
-assertTrue(re.test('\u{1F6B5}\u{1F3FB}'));
-assertTrue(re.test('\u{1F6B5}\u{1F3FC}'));
-assertTrue(re.test('\u{1F6B5}\u{1F3FD}'));
-assertTrue(re.test('\u{1F6B5}\u{1F3FE}'));
-assertTrue(re.test('\u{1F6B5}\u{1F3FF}'));
-assertTrue(re.test('\u{1F6B6}\u{1F3FB}'));
-assertTrue(re.test('\u{1F6B6}\u{1F3FC}'));
-assertTrue(re.test('\u{1F6B6}\u{1F3FD}'));
-assertTrue(re.test('\u{1F6B6}\u{1F3FE}'));
-assertTrue(re.test('\u{1F6B6}\u{1F3FF}'));
-assertTrue(re.test('\u{1F6C0}\u{1F3FB}'));
-assertTrue(re.test('\u{1F6C0}\u{1F3FC}'));
-assertTrue(re.test('\u{1F6C0}\u{1F3FD}'));
-assertTrue(re.test('\u{1F6C0}\u{1F3FE}'));
-assertTrue(re.test('\u{1F6C0}\u{1F3FF}'));
-assertTrue(re.test('\u{1F6CC}\u{1F3FB}'));
-assertTrue(re.test('\u{1F6CC}\u{1F3FC}'));
-assertTrue(re.test('\u{1F6CC}\u{1F3FD}'));
-assertTrue(re.test('\u{1F6CC}\u{1F3FE}'));
-assertTrue(re.test('\u{1F6CC}\u{1F3FF}'));
-assertTrue(re.test('\u{1F918}\u{1F3FB}'));
-assertTrue(re.test('\u{1F918}\u{1F3FC}'));
-assertTrue(re.test('\u{1F918}\u{1F3FD}'));
-assertTrue(re.test('\u{1F918}\u{1F3FE}'));
-assertTrue(re.test('\u{1F918}\u{1F3FF}'));
-assertTrue(re.test('\u{1F919}\u{1F3FB}'));
-assertTrue(re.test('\u{1F919}\u{1F3FC}'));
-assertTrue(re.test('\u{1F919}\u{1F3FD}'));
-assertTrue(re.test('\u{1F919}\u{1F3FE}'));
-assertTrue(re.test('\u{1F919}\u{1F3FF}'));
-assertTrue(re.test('\u{1F91A}\u{1F3FB}'));
-assertTrue(re.test('\u{1F91A}\u{1F3FC}'));
-assertTrue(re.test('\u{1F91A}\u{1F3FD}'));
-assertTrue(re.test('\u{1F91A}\u{1F3FE}'));
-assertTrue(re.test('\u{1F91A}\u{1F3FF}'));
-assertTrue(re.test('\u{1F91B}\u{1F3FB}'));
-assertTrue(re.test('\u{1F91B}\u{1F3FC}'));
-assertTrue(re.test('\u{1F91B}\u{1F3FD}'));
-assertTrue(re.test('\u{1F91B}\u{1F3FE}'));
-assertTrue(re.test('\u{1F91B}\u{1F3FF}'));
-assertTrue(re.test('\u{1F91C}\u{1F3FB}'));
-assertTrue(re.test('\u{1F91C}\u{1F3FC}'));
-assertTrue(re.test('\u{1F91C}\u{1F3FD}'));
-assertTrue(re.test('\u{1F91C}\u{1F3FE}'));
-assertTrue(re.test('\u{1F91C}\u{1F3FF}'));
-assertTrue(re.test('\u{1F91E}\u{1F3FB}'));
-assertTrue(re.test('\u{1F91E}\u{1F3FC}'));
-assertTrue(re.test('\u{1F91E}\u{1F3FD}'));
-assertTrue(re.test('\u{1F91E}\u{1F3FE}'));
-assertTrue(re.test('\u{1F91E}\u{1F3FF}'));
-assertTrue(re.test('\u{1F91F}\u{1F3FB}'));
-assertTrue(re.test('\u{1F91F}\u{1F3FC}'));
-assertTrue(re.test('\u{1F91F}\u{1F3FD}'));
-assertTrue(re.test('\u{1F91F}\u{1F3FE}'));
-assertTrue(re.test('\u{1F91F}\u{1F3FF}'));
-assertTrue(re.test('\u{1F926}\u{1F3FB}'));
-assertTrue(re.test('\u{1F926}\u{1F3FC}'));
-assertTrue(re.test('\u{1F926}\u{1F3FD}'));
-assertTrue(re.test('\u{1F926}\u{1F3FE}'));
-assertTrue(re.test('\u{1F926}\u{1F3FF}'));
-assertTrue(re.test('\u{1F930}\u{1F3FB}'));
-assertTrue(re.test('\u{1F930}\u{1F3FC}'));
-assertTrue(re.test('\u{1F930}\u{1F3FD}'));
-assertTrue(re.test('\u{1F930}\u{1F3FE}'));
-assertTrue(re.test('\u{1F930}\u{1F3FF}'));
-assertTrue(re.test('\u{1F931}\u{1F3FB}'));
-assertTrue(re.test('\u{1F931}\u{1F3FC}'));
-assertTrue(re.test('\u{1F931}\u{1F3FD}'));
-assertTrue(re.test('\u{1F931}\u{1F3FE}'));
-assertTrue(re.test('\u{1F931}\u{1F3FF}'));
-assertTrue(re.test('\u{1F932}\u{1F3FB}'));
-assertTrue(re.test('\u{1F932}\u{1F3FC}'));
-assertTrue(re.test('\u{1F932}\u{1F3FD}'));
-assertTrue(re.test('\u{1F932}\u{1F3FE}'));
-assertTrue(re.test('\u{1F932}\u{1F3FF}'));
-assertTrue(re.test('\u{1F933}\u{1F3FB}'));
-assertTrue(re.test('\u{1F933}\u{1F3FC}'));
-assertTrue(re.test('\u{1F933}\u{1F3FD}'));
-assertTrue(re.test('\u{1F933}\u{1F3FE}'));
-assertTrue(re.test('\u{1F933}\u{1F3FF}'));
-assertTrue(re.test('\u{1F934}\u{1F3FB}'));
-assertTrue(re.test('\u{1F934}\u{1F3FC}'));
-assertTrue(re.test('\u{1F934}\u{1F3FD}'));
-assertTrue(re.test('\u{1F934}\u{1F3FE}'));
-assertTrue(re.test('\u{1F934}\u{1F3FF}'));
-assertTrue(re.test('\u{1F935}\u{1F3FB}'));
-assertTrue(re.test('\u{1F935}\u{1F3FC}'));
-assertTrue(re.test('\u{1F935}\u{1F3FD}'));
-assertTrue(re.test('\u{1F935}\u{1F3FE}'));
-assertTrue(re.test('\u{1F935}\u{1F3FF}'));
-assertTrue(re.test('\u{1F936}\u{1F3FB}'));
-assertTrue(re.test('\u{1F936}\u{1F3FC}'));
-assertTrue(re.test('\u{1F936}\u{1F3FD}'));
-assertTrue(re.test('\u{1F936}\u{1F3FE}'));
-assertTrue(re.test('\u{1F936}\u{1F3FF}'));
-assertTrue(re.test('\u{1F937}\u{1F3FB}'));
-assertTrue(re.test('\u{1F937}\u{1F3FC}'));
-assertTrue(re.test('\u{1F937}\u{1F3FD}'));
-assertTrue(re.test('\u{1F937}\u{1F3FE}'));
-assertTrue(re.test('\u{1F937}\u{1F3FF}'));
-assertTrue(re.test('\u{1F938}\u{1F3FB}'));
-assertTrue(re.test('\u{1F938}\u{1F3FC}'));
-assertTrue(re.test('\u{1F938}\u{1F3FD}'));
-assertTrue(re.test('\u{1F938}\u{1F3FE}'));
-assertTrue(re.test('\u{1F938}\u{1F3FF}'));
-assertTrue(re.test('\u{1F939}\u{1F3FB}'));
-assertTrue(re.test('\u{1F939}\u{1F3FC}'));
-assertTrue(re.test('\u{1F939}\u{1F3FD}'));
-assertTrue(re.test('\u{1F939}\u{1F3FE}'));
-assertTrue(re.test('\u{1F939}\u{1F3FF}'));
-assertTrue(re.test('\u{1F93D}\u{1F3FB}'));
-assertTrue(re.test('\u{1F93D}\u{1F3FC}'));
-assertTrue(re.test('\u{1F93D}\u{1F3FD}'));
-assertTrue(re.test('\u{1F93D}\u{1F3FE}'));
-assertTrue(re.test('\u{1F93D}\u{1F3FF}'));
-assertTrue(re.test('\u{1F93E}\u{1F3FB}'));
-assertTrue(re.test('\u{1F93E}\u{1F3FC}'));
-assertTrue(re.test('\u{1F93E}\u{1F3FD}'));
-assertTrue(re.test('\u{1F93E}\u{1F3FE}'));
-assertTrue(re.test('\u{1F93E}\u{1F3FF}'));
-assertTrue(re.test('\u{1F9B5}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9B5}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9B5}\u{1F3FD}'));
-assertTrue(re.test('\u{1F9B5}\u{1F3FE}'));
-assertTrue(re.test('\u{1F9B5}\u{1F3FF}'));
-assertTrue(re.test('\u{1F9B6}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9B6}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9B6}\u{1F3FD}'));
-assertTrue(re.test('\u{1F9B6}\u{1F3FE}'));
-assertTrue(re.test('\u{1F9B6}\u{1F3FF}'));
-assertTrue(re.test('\u{1F9B8}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9B8}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9B8}\u{1F3FD}'));
-assertTrue(re.test('\u{1F9B8}\u{1F3FE}'));
-assertTrue(re.test('\u{1F9B8}\u{1F3FF}'));
-assertTrue(re.test('\u{1F9B9}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9B9}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9B9}\u{1F3FD}'));
-assertTrue(re.test('\u{1F9B9}\u{1F3FE}'));
-assertTrue(re.test('\u{1F9B9}\u{1F3FF}'));
-assertTrue(re.test('\u{1F9D1}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9D1}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9D1}\u{1F3FD}'));
-assertTrue(re.test('\u{1F9D1}\u{1F3FE}'));
-assertTrue(re.test('\u{1F9D1}\u{1F3FF}'));
-assertTrue(re.test('\u{1F9D2}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9D2}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9D2}\u{1F3FD}'));
-assertTrue(re.test('\u{1F9D2}\u{1F3FE}'));
-assertTrue(re.test('\u{1F9D2}\u{1F3FF}'));
-assertTrue(re.test('\u{1F9D3}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9D3}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9D3}\u{1F3FD}'));
-assertTrue(re.test('\u{1F9D3}\u{1F3FE}'));
-assertTrue(re.test('\u{1F9D3}\u{1F3FF}'));
-assertTrue(re.test('\u{1F9D4}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9D4}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9D4}\u{1F3FD}'));
-assertTrue(re.test('\u{1F9D4}\u{1F3FE}'));
-assertTrue(re.test('\u{1F9D4}\u{1F3FF}'));
-assertTrue(re.test('\u{1F9D5}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9D5}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9D5}\u{1F3FD}'));
-assertTrue(re.test('\u{1F9D5}\u{1F3FE}'));
-assertTrue(re.test('\u{1F9D5}\u{1F3FF}'));
-assertTrue(re.test('\u{1F9D6}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9D6}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9D6}\u{1F3FD}'));
-assertTrue(re.test('\u{1F9D6}\u{1F3FE}'));
-assertTrue(re.test('\u{1F9D6}\u{1F3FF}'));
-assertTrue(re.test('\u{1F9D7}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9D7}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9D7}\u{1F3FD}'));
-assertTrue(re.test('\u{1F9D7}\u{1F3FE}'));
-assertTrue(re.test('\u{1F9D7}\u{1F3FF}'));
-assertTrue(re.test('\u{1F9D8}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9D8}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9D8}\u{1F3FD}'));
-assertTrue(re.test('\u{1F9D8}\u{1F3FE}'));
-assertTrue(re.test('\u{1F9D8}\u{1F3FF}'));
-assertTrue(re.test('\u{1F9D9}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9D9}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9D9}\u{1F3FD}'));
-assertTrue(re.test('\u{1F9D9}\u{1F3FE}'));
-assertTrue(re.test('\u{1F9D9}\u{1F3FF}'));
-assertTrue(re.test('\u{1F9DA}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9DA}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9DA}\u{1F3FD}'));
-assertTrue(re.test('\u{1F9DA}\u{1F3FE}'));
-assertTrue(re.test('\u{1F9DA}\u{1F3FF}'));
-assertTrue(re.test('\u{1F9DB}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9DB}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9DB}\u{1F3FD}'));
-assertTrue(re.test('\u{1F9DB}\u{1F3FE}'));
-assertTrue(re.test('\u{1F9DB}\u{1F3FF}'));
-assertTrue(re.test('\u{1F9DC}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9DC}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9DC}\u{1F3FD}'));
-assertTrue(re.test('\u{1F9DC}\u{1F3FE}'));
-assertTrue(re.test('\u{1F9DC}\u{1F3FF}'));
-assertTrue(re.test('\u{1F9DD}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9DD}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9DD}\u{1F3FD}'));
-assertTrue(re.test('\u{1F9DD}\u{1F3FE}'));
-assertTrue(re.test('\u{1F595}\u{1F3FB}'));
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-emoji-tag-sequence-generated.js b/deps/v8/test/mjsunit/harmony/regexp-property-emoji-tag-sequence-generated.js
deleted file mode 100644
index 839d55791f..0000000000
--- a/deps/v8/test/mjsunit/harmony/regexp-property-emoji-tag-sequence-generated.js
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-regexp-sequence
-
-// These tests have been generated by the script at
-// https://gist.github.com/mathiasbynens/3b42c99a227521dabfe68d9e63f00f42.
-// Do not modify this file directly!
-
-const re = /\p{Emoji_Tag_Sequence}/u;
-
-assertTrue(re.test('\u{1F3F4}\u{E0067}\u{E0062}\u{E0065}\u{E006E}\u{E0067}\u{E007F}'));
-assertTrue(re.test('\u{1F3F4}\u{E0067}\u{E0062}\u{E0073}\u{E0063}\u{E0074}\u{E007F}'));
-assertTrue(re.test('\u{1F3F4}\u{E0067}\u{E0062}\u{E0077}\u{E006C}\u{E0073}\u{E007F}'));
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-emoji-zwj-sequence-generated.js b/deps/v8/test/mjsunit/harmony/regexp-property-emoji-zwj-sequence-generated.js
deleted file mode 100644
index b04f0e441b..0000000000
--- a/deps/v8/test/mjsunit/harmony/regexp-property-emoji-zwj-sequence-generated.js
+++ /dev/null
@@ -1,915 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-regexp-sequence
-
-// These tests have been generated by the script at
-// https://gist.github.com/mathiasbynens/3b42c99a227521dabfe68d9e63f00f42.
-// Do not modify this file directly!
-
-const re = /\p{Emoji_ZWJ_Sequence}/u;
-
-assertTrue(re.test('\u{1F468}\u200D\u2764\uFE0F\u200D\u{1F468}'));
-assertTrue(re.test('\u{1F441}\uFE0F\u200D\u{1F5E8}\uFE0F'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F466}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F466}\u200D\u{1F466}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F467}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F467}\u200D\u{1F466}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F467}\u200D\u{1F467}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F468}\u200D\u{1F466}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F468}\u200D\u{1F466}\u200D\u{1F466}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F468}\u200D\u{1F467}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F468}\u200D\u{1F467}\u200D\u{1F466}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F468}\u200D\u{1F467}\u200D\u{1F467}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F469}\u200D\u{1F466}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F469}\u200D\u{1F466}\u200D\u{1F466}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F469}\u200D\u{1F467}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F469}\u200D\u{1F467}\u200D\u{1F466}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F469}\u200D\u{1F467}\u200D\u{1F467}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FB}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FB}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FC}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FB}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FC}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FD}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FB}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FC}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FD}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FE}'));
-assertTrue(re.test('\u{1F469}\u200D\u2764\uFE0F\u200D\u{1F468}'));
-assertTrue(re.test('\u{1F469}\u200D\u2764\uFE0F\u200D\u{1F469}'));
-assertTrue(re.test('\u{1F469}\u200D\u2764\uFE0F\u200D\u{1F48B}\u200D\u{1F468}'));
-assertTrue(re.test('\u{1F469}\u200D\u2764\uFE0F\u200D\u{1F48B}\u200D\u{1F469}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F466}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F466}\u200D\u{1F466}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F467}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F467}\u200D\u{1F466}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F467}\u200D\u{1F467}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F469}\u200D\u{1F466}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F469}\u200D\u{1F466}\u200D\u{1F466}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F469}\u200D\u{1F467}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F469}\u200D\u{1F467}\u200D\u{1F466}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F469}\u200D\u{1F467}\u200D\u{1F467}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FC}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FD}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FE}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FF}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FB}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FD}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FE}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FF}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F91D}\u200D\u{1F469}\u{1F3FB}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FB}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FC}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FE}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FF}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F91D}\u200D\u{1F469}\u{1F3FB}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F91D}\u200D\u{1F469}\u{1F3FC}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FB}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FC}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FD}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FF}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F91D}\u200D\u{1F469}\u{1F3FB}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F91D}\u200D\u{1F469}\u{1F3FC}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F91D}\u200D\u{1F469}\u{1F3FD}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FB}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FC}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FD}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F91D}\u200D\u{1F468}\u{1F3FE}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F91D}\u200D\u{1F469}\u{1F3FB}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F91D}\u200D\u{1F469}\u{1F3FC}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F91D}\u200D\u{1F469}\u{1F3FD}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F91D}\u200D\u{1F469}\u{1F3FE}'));
-assertTrue(re.test('\u{1F9D1}\u200D\u{1F91D}\u200D\u{1F9D1}'));
-assertTrue(re.test('\u{1F9D1}\u{1F3FB}\u200D\u{1F91D}\u200D\u{1F9D1}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9D1}\u{1F3FC}\u200D\u{1F91D}\u200D\u{1F9D1}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9D1}\u{1F3FC}\u200D\u{1F91D}\u200D\u{1F9D1}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9D1}\u{1F3FD}\u200D\u{1F91D}\u200D\u{1F9D1}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9D1}\u{1F3FD}\u200D\u{1F91D}\u200D\u{1F9D1}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9D1}\u{1F3FD}\u200D\u{1F91D}\u200D\u{1F9D1}\u{1F3FD}'));
-assertTrue(re.test('\u{1F9D1}\u{1F3FE}\u200D\u{1F91D}\u200D\u{1F9D1}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9D1}\u{1F3FE}\u200D\u{1F91D}\u200D\u{1F9D1}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9D1}\u{1F3FE}\u200D\u{1F91D}\u200D\u{1F9D1}\u{1F3FD}'));
-assertTrue(re.test('\u{1F9D1}\u{1F3FE}\u200D\u{1F91D}\u200D\u{1F9D1}\u{1F3FE}'));
-assertTrue(re.test('\u{1F9D1}\u{1F3FF}\u200D\u{1F91D}\u200D\u{1F9D1}\u{1F3FB}'));
-assertTrue(re.test('\u{1F9D1}\u{1F3FF}\u200D\u{1F91D}\u200D\u{1F9D1}\u{1F3FC}'));
-assertTrue(re.test('\u{1F9D1}\u{1F3FF}\u200D\u{1F91D}\u200D\u{1F9D1}\u{1F3FD}'));
-assertTrue(re.test('\u{1F9D1}\u{1F3FF}\u200D\u{1F91D}\u200D\u{1F9D1}\u{1F3FE}'));
-assertTrue(re.test('\u{1F9D1}\u{1F3FF}\u200D\u{1F91D}\u200D\u{1F9D1}\u{1F3FF}'));
-assertTrue(re.test('\u{1F468}\u200D\u2695\uFE0F'));
-assertTrue(re.test('\u{1F468}\u200D\u2696\uFE0F'));
-assertTrue(re.test('\u{1F468}\u200D\u2708\uFE0F'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F33E}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F373}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F393}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F3A4}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F3A8}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F3EB}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F3ED}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F4BB}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F4BC}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F527}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F52C}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F680}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F692}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F9AF}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F9BC}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F9BD}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u2695\uFE0F'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u2696\uFE0F'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u2708\uFE0F'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F33E}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F373}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F393}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F3A4}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F3A8}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F3EB}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F3ED}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F4BB}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F4BC}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F527}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F52C}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F680}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F692}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F9AF}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F9BC}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F9BD}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u2695\uFE0F'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u2696\uFE0F'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u2708\uFE0F'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F33E}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F373}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F393}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F3A4}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F3A8}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F3EB}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F3ED}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F4BB}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F4BC}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F527}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F52C}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F680}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F692}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F9AF}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F9BC}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F9BD}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u2695\uFE0F'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u2696\uFE0F'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u2708\uFE0F'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F33E}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F373}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F393}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F3A4}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F3A8}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F3EB}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F3ED}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F4BB}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F4BC}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F527}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F52C}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F680}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F692}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F9AF}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F9BC}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F9BD}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u2695\uFE0F'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u2696\uFE0F'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u2708\uFE0F'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F33E}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F373}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F393}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F3A4}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F3A8}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F3EB}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F3ED}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F4BB}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F4BC}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F527}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F52C}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F680}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F692}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F9AF}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F9BC}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F9BD}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u2695\uFE0F'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u2696\uFE0F'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u2708\uFE0F'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F33E}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F373}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F393}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F3A4}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F3A8}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F3EB}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F3ED}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F4BB}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F4BC}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F527}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F52C}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F680}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F692}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F9AF}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F9BC}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F9BD}'));
-assertTrue(re.test('\u{1F469}\u200D\u2695\uFE0F'));
-assertTrue(re.test('\u{1F469}\u200D\u2696\uFE0F'));
-assertTrue(re.test('\u{1F469}\u200D\u2708\uFE0F'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F33E}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F373}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F393}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F3A4}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F3A8}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F3EB}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F3ED}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F4BB}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F4BC}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F527}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F52C}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F680}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F692}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F9AF}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F9BC}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F9BD}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u2695\uFE0F'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u2696\uFE0F'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u2708\uFE0F'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F33E}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F373}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F393}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F3A4}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F3A8}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F3EB}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F3ED}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F4BB}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F4BC}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F527}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F52C}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F680}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F692}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F9AF}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F9BC}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F9BD}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u2695\uFE0F'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u2696\uFE0F'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u2708\uFE0F'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F33E}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F373}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F393}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F3A4}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F3A8}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F3EB}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F3ED}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F4BB}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F4BC}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F527}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F52C}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F680}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F692}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F9AF}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F9BC}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F9BD}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u2695\uFE0F'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u2696\uFE0F'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u2708\uFE0F'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F33E}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F373}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F393}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F3A4}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F3A8}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F3EB}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F3ED}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F4BB}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F4BC}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F527}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F52C}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F680}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F692}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F9AF}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F9BC}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F9BD}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u2695\uFE0F'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u2696\uFE0F'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u2708\uFE0F'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F33E}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F373}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F393}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F3A4}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F3A8}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F3EB}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F3ED}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F4BB}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F4BC}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F527}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F52C}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F680}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F692}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F9AF}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F9BC}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F9BD}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u2695\uFE0F'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u2696\uFE0F'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u2708\uFE0F'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F33E}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F373}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F393}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F3A4}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F3A8}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F3EB}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F3ED}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F4BB}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F4BC}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F527}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F52C}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F680}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F692}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F9AF}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F9BC}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F9BD}'));
-assertTrue(re.test('\u26F9\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u26F9\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u26F9\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u26F9\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u26F9\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u26F9\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u26F9\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u26F9\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u26F9\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u26F9\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u26F9\uFE0F\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u26F9\uFE0F\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3C3}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3C3}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3C3}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3C3}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3C3}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3C3}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3C3}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3C3}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3C3}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3C3}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3C3}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3C3}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3C4}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3C4}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3C4}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3C4}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3C4}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3C4}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3C4}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3C4}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3C4}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3C4}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3C4}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3C4}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3CA}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3CA}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3CA}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3CA}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3CA}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3CA}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3CA}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3CA}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3CA}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3CA}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3CA}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3CA}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3CB}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3CB}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3CB}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3CB}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3CB}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3CB}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3CB}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3CB}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3CB}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3CB}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3CB}\uFE0F\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3CB}\uFE0F\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3CC}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3CC}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3CC}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3CC}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3CC}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3CC}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3CC}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3CC}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3CC}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3CC}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F3CC}\uFE0F\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F3CC}\uFE0F\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F46E}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F46E}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F46E}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F46E}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F46E}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F46E}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F46E}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F46E}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F46E}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F46E}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F46E}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F46E}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F46F}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F46F}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F471}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F471}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F471}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F471}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F471}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F471}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F471}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F471}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F471}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F471}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F471}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F471}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F473}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F473}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F473}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F473}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F473}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F473}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F473}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F473}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F473}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F473}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F473}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F473}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F477}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F477}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F477}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F477}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F477}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F477}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F477}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F477}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F477}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F477}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F477}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F477}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F481}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F481}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F481}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F481}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F481}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F481}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F481}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F481}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F481}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F481}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F481}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F481}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F482}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F482}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F468}\u200D\u2764\uFE0F\u200D\u{1F48B}\u200D\u{1F468}'));
-assertTrue(re.test('\u{1F482}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F482}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F482}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F482}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F482}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F482}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F482}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F482}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F482}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F486}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F486}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F486}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F486}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F486}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F486}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F486}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F486}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F486}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F486}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F486}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F486}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F487}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F487}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F487}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F487}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F487}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F487}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F487}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F487}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F487}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F487}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F487}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F487}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F575}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F575}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F575}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F575}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F575}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F575}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F575}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F575}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F575}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F575}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F575}\uFE0F\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F575}\uFE0F\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F645}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F645}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F645}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F645}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F645}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F645}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F645}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F645}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F645}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F645}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F645}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F645}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F646}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F646}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F646}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F646}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F646}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F646}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F646}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F646}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F646}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F646}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F646}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F646}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F647}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F647}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F647}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F647}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F647}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F647}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F647}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F647}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F647}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F647}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F647}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F647}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F64B}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F64B}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F64B}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F64B}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F64B}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F64B}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F64B}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F64B}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F64B}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F64B}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F64B}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F64B}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F64D}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F64D}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F64D}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F64D}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F64D}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F64D}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F64D}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F64D}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F64D}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F64D}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F64D}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F64D}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F64E}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F64E}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F64E}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F64E}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F64E}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F64E}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F64E}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F64E}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F64E}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F64E}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F64E}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F64E}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6A3}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6A3}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6A3}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6A3}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6A3}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6A3}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6A3}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6A3}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6A3}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6A3}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6A3}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6A3}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6B4}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6B4}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6B4}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6B4}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6B4}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6B4}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6B4}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6B4}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6B4}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6B4}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6B4}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6B4}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6B5}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6B5}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6B5}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6B5}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6B5}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6B5}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6B5}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6B5}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6B5}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6B5}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6B5}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6B5}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6B6}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6B6}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6B6}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6B6}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6B6}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6B6}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6B6}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6B6}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6B6}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6B6}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F6B6}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F6B6}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F926}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F926}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F926}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F926}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F926}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F926}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F926}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F926}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F926}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F926}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F926}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F926}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F937}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F937}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F937}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F937}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F937}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F937}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F937}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F937}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F937}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F937}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F937}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F937}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F938}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F938}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F938}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F938}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F938}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F938}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F938}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F938}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F938}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F938}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F938}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F938}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F939}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F939}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F939}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F939}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F939}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F939}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F939}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F939}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F939}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F939}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F939}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F939}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F93C}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F93C}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F93D}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F93D}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F93D}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F93D}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F93D}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F93D}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F93D}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F93D}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F93D}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F93D}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F93D}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F93D}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F93E}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F93E}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F93E}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F93E}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F93E}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F93E}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F93E}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F93E}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F93E}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F93E}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F93E}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F93E}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9B8}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9B8}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9B8}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9B8}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9B8}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9B8}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9B8}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9B8}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9B8}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9B8}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9B8}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9B8}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9B9}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9B9}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9B9}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9B9}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9B9}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9B9}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9B9}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9B9}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9B9}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9B9}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9B9}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9B9}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9CD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9CD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9CD}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9CD}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9CD}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9CD}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9CD}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9CD}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9CD}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9CD}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9CD}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9CD}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9CE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9CE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9CE}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9CE}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9CE}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9CE}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9CE}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9CE}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9CE}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9CE}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9CE}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9CE}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9CF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9CF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9CF}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9CF}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9CF}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9CF}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9CF}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9CF}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9CF}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9CF}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9CF}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9CF}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D6}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D6}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D6}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D6}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D6}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D6}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D6}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D6}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D6}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D6}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D6}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D6}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D7}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D7}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D7}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D7}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D7}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D7}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D7}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D7}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D7}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D7}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D7}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D7}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D8}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D8}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D8}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D8}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D8}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D8}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D8}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D8}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D8}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D8}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D8}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D8}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D9}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D9}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D9}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D9}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D9}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D9}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D9}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D9}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D9}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D9}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9D9}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9D9}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DA}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DA}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DA}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DA}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DA}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DA}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DA}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DA}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DA}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DA}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DA}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DA}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DB}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DB}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DB}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DB}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DB}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DB}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DB}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DB}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DB}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DB}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DC}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DC}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DC}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DC}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DC}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DC}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DC}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DC}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DC}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DC}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DD}\u{1F3FB}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DD}\u{1F3FB}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DD}\u{1F3FC}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DD}\u{1F3FC}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DD}\u{1F3FD}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DD}\u{1F3FD}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DD}\u{1F3FE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DD}\u{1F3FE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DD}\u{1F3FF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DD}\u{1F3FF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DE}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DE}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F9DF}\u200D\u2640\uFE0F'));
-assertTrue(re.test('\u{1F9DF}\u200D\u2642\uFE0F'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F9B0}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F9B1}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F9B2}'));
-assertTrue(re.test('\u{1F468}\u200D\u{1F9B3}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F9B0}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F9B1}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F9B2}'));
-assertTrue(re.test('\u{1F468}\u{1F3FB}\u200D\u{1F9B3}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F9B0}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F9B1}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F9B2}'));
-assertTrue(re.test('\u{1F468}\u{1F3FC}\u200D\u{1F9B3}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F9B0}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F9B1}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F9B2}'));
-assertTrue(re.test('\u{1F468}\u{1F3FD}\u200D\u{1F9B3}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F9B0}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F9B1}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F9B2}'));
-assertTrue(re.test('\u{1F468}\u{1F3FE}\u200D\u{1F9B3}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F9B0}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F9B1}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F9B2}'));
-assertTrue(re.test('\u{1F468}\u{1F3FF}\u200D\u{1F9B3}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F9B0}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F9B1}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F9B2}'));
-assertTrue(re.test('\u{1F469}\u200D\u{1F9B3}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F9B0}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F9B1}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F9B2}'));
-assertTrue(re.test('\u{1F469}\u{1F3FB}\u200D\u{1F9B3}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F9B0}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F9B1}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F9B2}'));
-assertTrue(re.test('\u{1F469}\u{1F3FC}\u200D\u{1F9B3}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F9B0}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F9B1}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F9B2}'));
-assertTrue(re.test('\u{1F469}\u{1F3FD}\u200D\u{1F9B3}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F9B0}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F9B1}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F9B2}'));
-assertTrue(re.test('\u{1F469}\u{1F3FE}\u200D\u{1F9B3}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F9B0}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F9B1}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F9B2}'));
-assertTrue(re.test('\u{1F469}\u{1F3FF}\u200D\u{1F9B3}'));
-assertTrue(re.test('\u{1F3F3}\uFE0F\u200D\u{1F308}'));
-assertTrue(re.test('\u{1F3F4}\u200D\u2620\uFE0F'));
-assertTrue(re.test('\u{1F415}\u200D\u{1F9BA}'));
-assertTrue(re.test('\u{1F482}\u{1F3FB}\u200D\u2640\uFE0F'));
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-sequence.js b/deps/v8/test/mjsunit/harmony/regexp-property-sequence.js
deleted file mode 100644
index 4d43298016..0000000000
--- a/deps/v8/test/mjsunit/harmony/regexp-property-sequence.js
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-regexp-sequence
-
-// Normal usage.
-assertDoesNotThrow("/\\p{Emoji_Flag_Sequence}/u");
-assertTrue(/\p{Emoji_Flag_Sequence}/u.test("\u{1F1E9}\u{1F1EA}"));
-
-assertDoesNotThrow("/\\p{Emoji_Keycap_Sequence}/u");
-assertTrue(/\p{Emoji_Keycap_Sequence}/u.test("\u0023\uFE0F\u20E3"));
-
-assertDoesNotThrow("/\\p{Emoji_Keycap_Sequence}/u");
-assertFalse(/\p{Emoji_Keycap_Sequence}/u.test("\u0022\uFE0F\u20E3"));
-
-assertDoesNotThrow("/\\p{Emoji_Modifier_Sequence}/u");
-assertTrue(/\p{Emoji_Modifier_Sequence}/u.test("\u26F9\u{1F3FF}"));
-
-assertDoesNotThrow("/\\p{Emoji_ZWJ_Sequence}/u");
-assertTrue(/\p{Emoji_ZWJ_Sequence}/u.test("\u{1F468}\u{200D}\u{1F467}"));
-
-// Without unicode flag.
-assertDoesNotThrow("/\\p{Emoji_Flag_Sequence}/");
-assertFalse(/\p{Emoji_Flag_Sequence}/.test("\u{1F1E9}\u{1F1EA}"));
-assertTrue(/\p{Emoji_Flag_Sequence}/.test("\\p{Emoji_Flag_Sequence}"));
-
-// Negated and/or inside a character class.
-assertThrows("/\\P{Emoji_Flag_Sequence}/u");
-assertThrows("/\\P{Emoji_Keycap_Sequence}/u");
-assertThrows("/\\P{Emoji_Modifier_Sequence}/u");
-assertThrows("/\\P{Emoji_Tag_Sequence}/u");
-assertThrows("/\\P{Emoji_ZWJ_Sequence}/u");
-
-assertThrows("/[\\p{Emoji_Flag_Sequence}]/u");
-assertThrows("/[\\p{Emoji_Keycap_Sequence}]/u");
-assertThrows("/[\\p{Emoji_Modifier_Sequence}]/u");
-assertThrows("/[\\p{Emoji_Tag_Sequence}]/u");
-assertThrows("/[\\p{Emoji_ZWJ_Sequence}]/u");
-
-assertThrows("/[\\P{Emoji_Flag_Sequence}]/u");
-assertThrows("/[\\P{Emoji_Keycap_Sequence}]/u");
-assertThrows("/[\\P{Emoji_Modifier_Sequence}]/u");
-assertThrows("/[\\P{Emoji_Tag_Sequence}]/u");
-assertThrows("/[\\P{Emoji_ZWJ_Sequence}]/u");
-
-assertThrows("/[\\w\\p{Emoji_Flag_Sequence}]/u");
-assertThrows("/[\\w\\p{Emoji_Keycap_Sequence}]/u");
-assertThrows("/[\\w\\p{Emoji_Modifier_Sequence}]/u");
-assertThrows("/[\\w\\p{Emoji_Tag_Sequence}]/u");
-assertThrows("/[\\w\\p{Emoji_ZWJ_Sequence}]/u");
-
-assertThrows("/[\\w\\P{Emoji_Flag_Sequence}]/u");
-assertThrows("/[\\w\\P{Emoji_Keycap_Sequence}]/u");
-assertThrows("/[\\w\\P{Emoji_Modifier_Sequence}]/u");
-assertThrows("/[\\w\\P{Emoji_Tag_Sequence}]/u");
-assertThrows("/[\\w\\P{Emoji_ZWJ_Sequence}]/u");
-
-// Two regional indicators, but not a country.
-assertFalse(/\p{Emoji_Flag_Sequence}/u.test("\u{1F1E6}\u{1F1E6}"));
-
-// ZWJ sequence as in two ZWJ elements joined by a ZWJ, but not in the list.
-assertFalse(/\p{Emoji_ZWJ_Sequence}/u.test("\u{1F467}\u{200D}\u{1F468}"));
-
-// More complex regexp
-assertEquals(
- ["country flag: \u{1F1E6}\u{1F1F9}"],
- /Country Flag: \p{Emoji_Flag_Sequence}/iu.exec(
- "this is an example of a country flag: \u{1F1E6}\u{1F1F9} is Austria"));
-assertEquals(
- ["country flag: \u{1F1E6}\u{1F1F9}", "\u{1F1E6}\u{1F1F9}"],
- /Country Flag: (\p{Emoji_Flag_Sequence})/iu.exec(
- "this is an example of a country flag: \u{1F1E6}\u{1F1F9} is Austria"));
-assertEquals(
- ["country flag: \u{1F1E6}\u{1F1F9}"],
- /Country Flag: ..(?<=\p{Emoji_Flag_Sequence})/iu.exec(
- "this is an example of a country flag: \u{1F1E6}\u{1F1F9} is Austria"));
-assertEquals(
- ["flag: \u{1F1E6}\u{1F1F9}", "\u{1F1E6}\u{1F1F9}"],
- /Flag: ..(?<=(\p{Emoji_Flag_Sequence})|\p{Emoji_Keycap_Sequence})/iu.exec(
- "this is an example of a country flag: \u{1F1E6}\u{1F1F9} is Austria"));
-
-// Partial sequences.
-assertFalse(/\p{Emoji_Flag_Sequence}/u.test("\u{1F1E6}_"));
-assertFalse(/\p{Emoji_Keycap_Sequence}/u.test("2\uFE0F_"));
-assertFalse(/\p{Emoji_Modifier_Sequence}/u.test("\u261D_"));
-assertFalse(/\p{Emoji_Tag_Sequence}/u.test("\u{1F3F4}\u{E0067}\u{E0062}\u{E0065}\u{E006E}\u{E0067}_"));
-assertFalse(/\p{Emoji_ZWJ_Sequence}/u.test("\u{1F468}\u200D\u2764\uFE0F\u200D_"));
diff --git a/deps/v8/test/mjsunit/ic-migrated-map-add-when-monomorphic.js b/deps/v8/test/mjsunit/ic-migrated-map-add-when-monomorphic.js
index ecc2a239f1..3816d37592 100644
--- a/deps/v8/test/mjsunit/ic-migrated-map-add-when-monomorphic.js
+++ b/deps/v8/test/mjsunit/ic-migrated-map-add-when-monomorphic.js
@@ -13,7 +13,8 @@ function load(o) { return o.x }
%PrepareFunctionForOptimization(load);
// Initialize the load IC with a map that will not be deprecated.
-load(new A());
+var a = new A();
+load(a);
const oldB = new B();
(new B()).x = 1.5; // deprecates map
diff --git a/deps/v8/test/mjsunit/json.js b/deps/v8/test/mjsunit/json.js
index f6e4c20da2..37d427aa83 100644
--- a/deps/v8/test/mjsunit/json.js
+++ b/deps/v8/test/mjsunit/json.js
@@ -523,3 +523,8 @@ assertEquals('{"":"inf"}', JSON.stringify({"":Infinity}, reviver));
assertEquals([10.4, "\u1234"], JSON.parse("[10.4, \"\u1234\"]"));
assertEquals(10, JSON.parse('{"10":10}')["10"]);
+
+assertEquals(`[
+ 1,
+ 2
+]`, JSON.stringify([1,2], undefined, 1000000000000000));
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 0a19067cc6..5a9f713142 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -77,7 +77,7 @@
# Enable once serializing a running isolate is fully implemented.
'serialize-deserialize-now': [SKIP],
- # BUG(v8:9506): times out.
+ # BUG(v8:9506): slow tests.
'wasm/shared-memory-worker-explicit-gc-stress': [PASS, SLOW],
'wasm/shared-memory-worker-gc-stress': [PASS, SLOW],
@@ -211,11 +211,6 @@
'regress/regress-crbug-941743': [PASS, HEAVY],
'regress/regress-crbug-1191886': [PASS, HEAVY],
'wasm/externref-globals': [PASS, HEAVY],
-
- # BUG(v8:12173).
- 'compiler/call-with-arraylike-or-spread-7': [PASS, FAIL],
- 'ic-migrated-map-add-when-monomorphic': [PASS, FAIL],
- 'es6/map-constructor-entry-side-effect2': [PASS, FAIL]
}], # ALWAYS
##############################################################################
@@ -848,7 +843,6 @@
'regress/regress-490': [SKIP],
'regress/regress-create-exception': [SKIP],
'regress/regress-3247124': [SKIP],
- 'compiler/regress-1226988': [SKIP],
# Requires bigger stack size in the Genesis and if stack size is increased,
# the test requires too much time to run. However, the problem test covers
@@ -876,9 +870,6 @@
'regress/regress-1138075': [SKIP],
'regress/regress-1138611': [SKIP],
- # Some atomic functions are not yet implemented
- 'regress/wasm/regress-1196837': [SKIP],
-
# SIMD not be implemented
'regress/wasm/regress-1054466': [SKIP],
'regress/wasm/regress-1065599': [SKIP],
@@ -1284,17 +1275,17 @@
}], # arch != x64 or deopt_fuzzer
##############################################################################
-# Liftoff is currently only sufficiently implemented on x64, ia32, arm64 and
-# arm.
-# TODO(clemensb): Implement on all other platforms (crbug.com/v8/6600).
-['arch not in (x64, ia32, arm64, arm)', {
+# Skip Liftoff tests on platforms that do not fully implement Liftoff.
+['arch not in (x64, ia32, arm64, arm, s390x)', {
'wasm/liftoff': [SKIP],
'wasm/liftoff-debug': [SKIP],
'wasm/tier-up-testing-flag': [SKIP],
'wasm/tier-down-to-liftoff': [SKIP],
'wasm/wasm-dynamic-tiering': [SKIP],
'wasm/test-partial-serialization': [SKIP],
-}], # arch not in (x64, ia32, arm64, arm)
+ 'regress/wasm/regress-1248024': [SKIP],
+ 'regress/wasm/regress-1251465': [SKIP],
+}], # arch not in (x64, ia32, arm64, arm, s390x)
##############################################################################
['system != linux or virtual_memory_cage == True', {
@@ -1438,6 +1429,7 @@
'wasm/liftoff-simd-params': [SKIP],
'wasm/multi-value-simd': [SKIP],
'wasm/simd-*': [SKIP],
+ 'regress/wasm/regress-9447': [SKIP],
'regress/wasm/regress-10309': [SKIP],
'regress/wasm/regress-10831': [SKIP],
'regress/wasm/regress-1054466': [SKIP],
@@ -1450,16 +1442,23 @@
'regress/wasm/regress-1124885': [SKIP],
'regress/wasm/regress-1132461': [SKIP],
'regress/wasm/regress-1161555': [SKIP],
+ 'regress/wasm/regress-1161654': [SKIP],
'regress/wasm/regress-1161954': [SKIP],
'regress/wasm/regress-1165966': [SKIP],
'regress/wasm/regress-1187831': [SKIP],
'regress/wasm/regress-1199662': [SKIP],
'regress/wasm/regress-1231950': [SKIP],
+ 'regress/wasm/regress-1237024': [SKIP],
'regress/wasm/regress-1242300': [SKIP],
'regress/wasm/regress-1242689': [SKIP],
}], # no_simd_hardware == True
##############################################################################
+['no_simd_hardware == False', {
+ 'regress/wasm/regress-1254675': [SKIP],
+}], # no_simd_hardware == False
+
+##############################################################################
# TODO(v8:11421): Port baseline compiler to other architectures.
['arch not in (x64, arm64, ia32, arm, mips64el, mipsel, riscv64, loong64)', {
'baseline/*': [SKIP],
@@ -1477,13 +1476,6 @@
'regress/regress-779407': [SKIP],
}], # variant == experimental_regexp
-##############################################################################
-['variant == instruction_scheduling or variant == stress_instruction_scheduling', {
- # BUG(12018): These tests currently fail with --turbo-instruction-scheduling.
- 'regress/wasm/regress-1231950': [SKIP],
- 'regress/wasm/regress-1242300': [SKIP],
-}], # variant == instruction_scheduling or variant == stress_instruction_scheduling
-
################################################################################
['single_generation', {
# These tests rely on allocation site tracking which only works in the young generation.
diff --git a/deps/v8/test/mjsunit/regress/asm/regress-1248677.js b/deps/v8/test/mjsunit/regress/asm/regress-1248677.js
new file mode 100644
index 0000000000..f18f5068df
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/asm/regress-1248677.js
@@ -0,0 +1,24 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function setup_proxy() {
+ // Mess with the prototype to get funky conversion behavior.
+ Function.prototype.__proto__ = new Proxy(setup_proxy, {
+ get: async (target, key) => {
+ console.log(key);
+ }
+ });
+}
+
+setup_proxy();
+
+function asm(global, imports) {
+ 'use asm';
+ // Trigger proxy trap when looking up #toPrimitive:
+ var bar = +imports.bar;
+ function f() {}
+ return {f: f};
+}
+
+assertThrows(() => asm(undefined, {bar: setup_proxy}), TypeError);
diff --git a/deps/v8/test/mjsunit/regress/asm/regress-1252747.js b/deps/v8/test/mjsunit/regress/asm/regress-1252747.js
new file mode 100644
index 0000000000..4a9497bfc4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/asm/regress-1252747.js
@@ -0,0 +1,27 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function print_stack(unsigned) {
+ print('stack:');
+ print((new Error()).stack);
+}
+
+function asm(global, env) {
+ 'use asm';
+
+ var print_stack = env.print_stack;
+
+ function main() {
+ var count = 0;
+
+ while ((count | 0) < 10) {
+ print_stack(1);
+ count = count + 1 | 0;
+ }
+ }
+
+ return main;
+}
+
+asm({}, {'print_stack': print_stack})();
diff --git a/deps/v8/test/mjsunit/regress/regress-1016450.js b/deps/v8/test/mjsunit/regress/regress-1016450.js
index a98f2744a5..9c722cfc0d 100644
--- a/deps/v8/test/mjsunit/regress/regress-1016450.js
+++ b/deps/v8/test/mjsunit/regress/regress-1016450.js
@@ -26,4 +26,6 @@ assertEquals(17n, f(2n));
assertEquals(16n, f(1n));
assertOptimized(f);
assertEquals(15n, f(0));
-assertUnoptimized(f);
+if (%Is64Bit()) {
+ assertUnoptimized(f);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-1073440.js b/deps/v8/test/mjsunit/regress/regress-1073440.js
index 81c328e828..8d049af559 100644
--- a/deps/v8/test/mjsunit/regress/regress-1073440.js
+++ b/deps/v8/test/mjsunit/regress/regress-1073440.js
@@ -25,7 +25,9 @@ assertEquals(foo(1), 0);
assertOptimized(foo);
%PrepareFunctionForOptimization(foo);
assertEquals(foo(2), 1);
-assertUnoptimized(foo);
+if (%Is64Bit()) {
+ assertUnoptimized(foo);
+}
// Check that we learned something and do not loop deoptimizations.
%OptimizeFunctionOnNextCall(foo);
assertEquals(foo(1), 0);
diff --git a/deps/v8/test/mjsunit/regress/regress-12256.js b/deps/v8/test/mjsunit/regress/regress-12256.js
deleted file mode 100644
index e6407c06ed..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-12256.js
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-const dates = [{ year: '2021', month: '10', day: '22', hour: '10', minute: '12', second: '32' },
- { year: '2021', month: '8', day: '3', hour: '9', minute: '9', second: '6' }];
-
-for (let date of dates) {
- const { year, month, day, hour, minute, second } = date;
- const s0 = `${year}-${month}-${day} ${hour}:${minute}:${second}Z`;
-
- // V8 reads at most kMaxSignificantDigits (9) to build the value of a numeral,
- // so let's test up to 9 leading zeros.
-
- // For years
- for (let i = 1; i < 10; i++) {
- const s1 = `${'0'.repeat(i) + year}-${month}-${day} ${hour}:${minute}:${second}Z`;
- assertTrue(new Date(s0).getTime() == new Date(s1).getTime());
- }
-
- // For months
- for (let i = 1; i < 10; i++) {
- const s1 = `${year}-${'0'.repeat(i) + month}-${day} ${hour}:${minute}:${second}Z`;
- assertTrue(new Date(s0).getTime() == new Date(s1).getTime());
- }
-
- // For days
- for (let i = 1; i < 10; i++) {
- const s1 = `${year}-${month}-${'0'.repeat(i) + day} ${hour}:${minute}:${second}Z`;
- assertTrue(new Date(s0).getTime() == new Date(s1).getTime());
- }
-
- // For hours
- for (let i = 1; i < 10; i++) {
- const s1 = `${year}-${month}-${day} ${'0'.repeat(i) + hour}:${minute}:${second}Z`;
- assertTrue(new Date(s0).getTime() == new Date(s1).getTime());
- }
-
- // For minutes
- for (let i = 1; i < 10; i++) {
- const s1 = `${year}-${month}-${day} ${hour}:${'0'.repeat(i) + minute}:${second}Z`;
- assertTrue(new Date(s0).getTime() == new Date(s1).getTime());
- }
-
- // For seconds
- for (let i = 1; i < 10; i++) {
- const s1 = `${year}-${month}-${day} ${hour}:${minute}:${'0'.repeat(i) + second}Z`;
- assertTrue(new Date(s0).getTime() == new Date(s1).getTime());
- }
-
- // With same input date string,
- // Date() and Date.parse() should return the same date
- assertTrue(new Date(s0).getTime() == Date.parse(s0));
-}
diff --git a/deps/v8/test/mjsunit/regress/regress-1238033.js b/deps/v8/test/mjsunit/regress/regress-1238033.js
new file mode 100644
index 0000000000..8d3e40f277
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1238033.js
@@ -0,0 +1,6 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+RegExp.prototype[Symbol.match] = null;
+'ab'.matchAll(/./); // Must not throw.
diff --git a/deps/v8/test/mjsunit/regress/regress-1254191.js b/deps/v8/test/mjsunit/regress/regress-1254191.js
new file mode 100644
index 0000000000..19a3062c86
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1254191.js
@@ -0,0 +1,18 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+function f(a) {
+ let x = -1n;
+ if (!a) {
+ x = a;
+ }
+ x|0;
+}
+
+%PrepareFunctionForOptimization(f);
+f(false);
+%OptimizeFunctionOnNextCall(f);
+assertThrows(() => f(true), TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-353004.js b/deps/v8/test/mjsunit/regress/regress-353004.js
index f5430c6df4..a40eaff136 100644
--- a/deps/v8/test/mjsunit/regress/regress-353004.js
+++ b/deps/v8/test/mjsunit/regress/regress-353004.js
@@ -36,23 +36,6 @@ assertThrows(() =>
assertTrue(convertedOffset);
assertTrue(convertedLength);
-var buffer3 = new ArrayBuffer(100 * 1024 * 1024);
-var dataView1 = new DataView(buffer3, {valueOf : function() {
- %ArrayBufferDetach(buffer3);
- return 0;
-}});
-
-assertEquals(0, dataView1.byteLength);
-
-var buffer4 = new ArrayBuffer(100 * 1024);
-assertThrows(function() {
- var dataView2 = new DataView(buffer4, 0, {valueOf : function() {
- %ArrayBufferDetach(buffer4);
- return 100 * 1024 * 1024;
- }});
-}, RangeError);
-
-
var buffer5 = new ArrayBuffer(100 * 1024);
assertThrows(function() {
buffer5.slice({valueOf : function() {
diff --git a/deps/v8/test/mjsunit/regress/regress-9441.js b/deps/v8/test/mjsunit/regress/regress-9441.js
index d2fb17a239..f342ec790c 100644
--- a/deps/v8/test/mjsunit/regress/regress-9441.js
+++ b/deps/v8/test/mjsunit/regress/regress-9441.js
@@ -14,7 +14,9 @@ assertEquals(-1n, foo(1n, 2n));
assertEquals(1n, foo(2n, 1n));
assertOptimized(foo);
assertThrows(() => foo(2n, undefined));
-assertUnoptimized(foo);
+if (%Is64Bit()) {
+ assertUnoptimized(foo);
+}
%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertEquals(-1n, foo(1n, 2n));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1248704.js b/deps/v8/test/mjsunit/regress/regress-crbug-1248704.js
new file mode 100644
index 0000000000..06ef707871
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1248704.js
@@ -0,0 +1,12 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let evil = {
+ valueOf: function () {
+ array.length = 1;
+ }
+};
+let array = [1, 2, 3];
+let newArray = array.slice(evil);
+assertEquals(3, newArray.length);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1249941.js b/deps/v8/test/mjsunit/regress/regress-crbug-1249941.js
new file mode 100644
index 0000000000..0f1e6b1e85
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1249941.js
@@ -0,0 +1,16 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --always-opt
+
+(function() {
+ function foo() {
+ assertThrowsAsync(new Promise(() => { %DeoptimizeFunction(foo); throw new Error(); }));
+ }
+ %PrepareFunctionForOptimization(foo);
+ foo();
+ foo();
+ %OptimizeFunctionOnNextCall(foo);
+ foo();
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1254704.js b/deps/v8/test/mjsunit/regress/regress-crbug-1254704.js
new file mode 100644
index 0000000000..222ee5f3dd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1254704.js
@@ -0,0 +1,5 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertEquals(/[\k(]\1/.exec("ab(\1cd"), ["(\1"]);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-10602.js b/deps/v8/test/mjsunit/regress/regress-v8-10602.js
new file mode 100644
index 0000000000..37c4db2725
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-10602.js
@@ -0,0 +1,5 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(String.raw`/[\k](?<a>)/.exec()`);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-12194.js b/deps/v8/test/mjsunit/regress/regress-v8-12194.js
new file mode 100644
index 0000000000..cf9c423416
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-12194.js
@@ -0,0 +1,74 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --no-always-opt --no-stress-opt
+// Flags: --deopt-every-n-times=0 --no-force-slow-path
+
+(function TestSliceWithoutParams() {
+ let array = [0, 1, 2];
+
+ function f() {
+ let array2 = array.slice();
+ array2[1] = array2[0];
+ }
+
+ %PrepareFunctionForOptimization(f);
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+
+ // Assert that the function was not deoptimized.
+ assertOptimized(f);
+})();
+
+(function TestSliceWithStartZero() {
+ let array = [0, 1, 2];
+
+ function f() {
+ let array2 = array.slice(0);
+ array2[1] = array2[0];
+ }
+
+ %PrepareFunctionForOptimization(f);
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+
+ // Assert that the function was not deoptimized.
+ assertOptimized(f);
+})();
+
+(function TestSliceWithStartNonZero() {
+ let array = [0, 1, 2];
+
+ function f() {
+ let array2 = array.slice(1);
+ array2[1] = array2[0];
+ }
+
+ %PrepareFunctionForOptimization(f);
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+
+ // Assert that the function was not deoptimized.
+ assertOptimized(f);
+})();
+
+(function TestSliceWithStartZeroEndNonUndefined() {
+ let array = [0, 1, 2];
+
+ function f() {
+ let array2 = array.slice(0, 1);
+ array2[1] = array2[0];
+ }
+
+ %PrepareFunctionForOptimization(f);
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+
+ // Assert that the function was not deoptimized.
+ assertOptimized(f);
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1237024.js b/deps/v8/test/mjsunit/regress/wasm/regress-1237024.js
index 04dd8018bf..8e5c8272b9 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-1237024.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1237024.js
@@ -14,7 +14,7 @@ builder.addFunction("main", kSig_i_i)
.addBody([
kExprLocalGet, 0,
kGCPrefix, kExprRttCanon, array_index,
- kGCPrefix, kExprArrayNewDefault, array_index,
+ kGCPrefix, kExprArrayNewDefaultWithRtt, array_index,
kGCPrefix, kExprArrayLen, array_index,
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1239116.js b/deps/v8/test/mjsunit/regress/wasm/regress-1239116.js
new file mode 100644
index 0000000000..58c6fbe0ca
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1239116.js
@@ -0,0 +1,19 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(1, 1, true);
+builder.addFunction('main', kSig_i_v)
+ .addBody([
+ kExprI32Const, 0, // i32.const
+ kExprI32LoadMem8S, 0, 0, // i32.load8_s
+ kExprI32LoadMem, 0, 0, // i32.load
+ ])
+ .exportFunc();
+const instance = builder.instantiate();
+let mem = new Uint8Array(instance.exports.memory.buffer);
+mem[0] = -1;
+assertTraps(kTrapMemOutOfBounds, instance.exports.main);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1239116b.js b/deps/v8/test/mjsunit/regress/wasm/regress-1239116b.js
new file mode 100644
index 0000000000..10ce395964
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1239116b.js
@@ -0,0 +1,19 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(1, 1, true);
+builder.addFunction('main', kSig_i_v)
+ .addBody([
+ kExprI32Const, 0, // i32.const
+ kExprI32LoadMem16S, 0, 0, // i32.load16_s
+ kExprI32LoadMem, 0, 0, // i32.load
+ ])
+ .exportFunc();
+const instance = builder.instantiate();
+let mem = new Uint16Array(instance.exports.memory.buffer);
+mem[0] = -1;
+assertTraps(kTrapMemOutOfBounds, instance.exports.main);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1248024.js b/deps/v8/test/mjsunit/regress/wasm/regress-1248024.js
new file mode 100644
index 0000000000..d295a3974a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1248024.js
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --liftoff-only --trace-wasm-memory
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 17);
+// Generate function 1 (out of 3).
+builder.addFunction('load', kSig_i_v)
+ .addBody([
+ // body:
+ kExprI32Const, 0, // i32.const
+ kExprI32LoadMem8U, 0, 5, // i32.load8_u
+ ])
+ .exportFunc();
+const instance = builder.instantiate();
+instance.exports.load();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1251465.js b/deps/v8/test/mjsunit/regress/wasm/regress-1251465.js
new file mode 100644
index 0000000000..ae832bda83
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1251465.js
@@ -0,0 +1,26 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-staging --experimental-wasm-gc --liftoff-only
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32, false, false);
+builder.addType(makeSig([], [kWasmI32]));
+builder.addTable(kWasmFuncRef, 1, 1, undefined)
+builder.addFunction(undefined, 0 /* sig */)
+ .addBodyWithEnd([
+kExprI32Const, 0x00,
+kExprI32Const, 0x00,
+kExprTableGet, 0x00,
+kExprI32Const, 0xff, 0x01,
+kNumericPrefix, kExprTableGrow, 0x00,
+kExprF32Const, 0x00, 0x00, 0x00, 0x00,
+kExprF32StoreMem, 0x00, 0x01,
+kExprEnd
+]);
+builder.addExport('main', 0);
+const instance = builder.instantiate();
+assertThrows(() => instance.exports.main(), WebAssembly.RuntimeError);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1254674.js b/deps/v8/test/mjsunit/regress/wasm/regress-1254674.js
new file mode 100644
index 0000000000..f56e1fef6f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1254674.js
@@ -0,0 +1,11 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --print-wasm-code
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+builder.addImport('Math', 'sqrt', kSig_d_d);
+builder.instantiate({Math: Math});
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1254675.js b/deps/v8/test/mjsunit/regress/wasm/regress-1254675.js
new file mode 100644
index 0000000000..9df8f17211
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1254675.js
@@ -0,0 +1,30 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This test should only be run on configurations that don't support Wasm SIMD.
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+// Test case manually reduced from https://crbug.com/1254675.
+// This exercises a bug where we are missing checks for SIMD hardware support
+// when a function has a v128 parameter but doesn't use any SIMD instructions.
+(function() {
+ const builder = new WasmModuleBuilder();
+ builder.addType(kSig_i_s);
+ builder.addFunction(undefined, 0)
+ .addBodyWithEnd([kExprUnreachable, kExprEnd]);
+
+ assertThrows(() => builder.instantiate());
+}());
+
+// Additional test case to verify that a declared v128 local traps.
+(function() {
+ const builder = new WasmModuleBuilder();
+ builder.addType(kSig_i_i);
+ builder.addFunction(undefined, 0)
+ .addBodyWithEnd([kExprUnreachable, kExprEnd])
+ .addLocals('v128', 1);
+
+ assertThrows(() => builder.instantiate());
+}());
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1255354.js b/deps/v8/test/mjsunit/regress/wasm/regress-1255354.js
new file mode 100644
index 0000000000..2a8d30e635
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1255354.js
@@ -0,0 +1,26 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --no-liftoff
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+var builder = new WasmModuleBuilder();
+
+builder.addMemory(1, 1, /* exported = */ false);
+
+builder.addFunction("main", kSig_i_i).addBody([
+ kExprLocalGet, 0,
+ kExprIf, kWasmI32,
+ kExprLocalGet, 0,
+ kExprElse,
+ kExprI32Const, 42, // value
+ kExprI32Const, 0, // index
+ kExprI32StoreMem, 0, 0,
+ kExprI32Const, 11,
+ kExprLocalGet, 0,
+ kExprI32DivS,
+ kExprEnd
+ ]).exportFunc();
+
+var instance = builder.instantiate({});
diff --git a/deps/v8/test/mjsunit/resizablearraybuffer-growablesharedarraybuffer.js b/deps/v8/test/mjsunit/resizablearraybuffer-growablesharedarraybuffer.js
index 3e779af415..b24d097395 100644
--- a/deps/v8/test/mjsunit/resizablearraybuffer-growablesharedarraybuffer.js
+++ b/deps/v8/test/mjsunit/resizablearraybuffer-growablesharedarraybuffer.js
@@ -6,6 +6,8 @@
"use strict";
+d8.file.execute('test/mjsunit/typedarray-helpers.js');
+
function CreateResizableArrayBuffer(byteLength, maxByteLength) {
return new ArrayBuffer(byteLength, {maxByteLength: maxByteLength});
}
@@ -52,11 +54,11 @@ function growHelper(ab, value) {
assertEquals(0, rab.maxByteLength);
})();
-const ctors = [[ArrayBuffer, (b) => b.resizable],
- [SharedArrayBuffer, (b) => b.growable]];
+const arrayBufferCtors = [[ArrayBuffer, (b) => b.resizable],
+ [SharedArrayBuffer, (b) => b.growable]];
(function TestOptionsBagNotObject() {
- for (let [ctor, resizable] of ctors) {
+ for (let [ctor, resizable] of arrayBufferCtors) {
const buffer = new ctor(10, 'this is not an options bag');
assertFalse(resizable(buffer));
}
@@ -66,7 +68,7 @@ const ctors = [[ArrayBuffer, (b) => b.resizable],
let evil = {};
Object.defineProperty(evil, 'maxByteLength',
{get: () => { throw new Error('thrown'); }});
- for (let [ctor, resizable] of ctors) {
+ for (let [ctor, resizable] of arrayBufferCtors) {
let caught = false;
try {
new ctor(10, evil);
@@ -79,14 +81,14 @@ const ctors = [[ArrayBuffer, (b) => b.resizable],
})();
(function TestMaxByteLengthNonExisting() {
- for (let [ctor, resizable] of ctors) {
+ for (let [ctor, resizable] of arrayBufferCtors) {
const buffer = new ctor(10, {});
assertFalse(resizable(buffer));
}
})();
(function TestMaxByteLengthUndefinedOrNan() {
- for (let [ctor, resizable] of ctors) {
+ for (let [ctor, resizable] of arrayBufferCtors) {
const buffer1 = new ctor(10, {maxByteLength: undefined});
assertFalse(resizable(buffer1));
const buffer2 = new ctor(0, {maxByteLength: NaN});
@@ -97,7 +99,7 @@ const ctors = [[ArrayBuffer, (b) => b.resizable],
})();
(function TestMaxByteLengthBooleanNullOrString() {
- for (let [ctor, resizable] of ctors) {
+ for (let [ctor, resizable] of arrayBufferCtors) {
const buffer1 = new ctor(0, {maxByteLength: true});
assertTrue(resizable(buffer1));
assertEquals(0, buffer1.byteLength);
@@ -118,7 +120,7 @@ const ctors = [[ArrayBuffer, (b) => b.resizable],
})();
(function TestMaxByteLengthDouble() {
- for (let [ctor, resizable] of ctors) {
+ for (let [ctor, resizable] of arrayBufferCtors) {
const buffer1 = new ctor(0, {maxByteLength: -0.0});
assertTrue(resizable(buffer1));
assertEquals(0, buffer1.byteLength);
@@ -138,7 +140,7 @@ const ctors = [[ArrayBuffer, (b) => b.resizable],
(function TestMaxByteLengthThrows() {
const evil = {valueOf: () => { throw new Error('thrown');}};
- for (let [ctor, resizable] of ctors) {
+ for (let [ctor, resizable] of arrayBufferCtors) {
let caught = false;
try {
new ctor(0, {maxByteLength: evil});
@@ -153,7 +155,7 @@ const ctors = [[ArrayBuffer, (b) => b.resizable],
(function TestByteLengthThrows() {
const evil1 = {valueOf: () => { throw new Error('byteLength throws');}};
const evil2 = {valueOf: () => { throw new Error('maxByteLength throws');}};
- for (let [ctor, resizable] of ctors) {
+ for (let [ctor, resizable] of arrayBufferCtors) {
let caught = false;
try {
new ctor(evil1, {maxByteLength: evil2});
@@ -544,3 +546,73 @@ const ctors = [[ArrayBuffer, (b) => b.resizable],
assertEquals('ok', w.getMessage());
assertEquals(15, gsab.byteLength);
})();
+
+(function Slice() {
+ const rab = CreateResizableArrayBuffer(10, 20);
+ const sliced1 = rab.slice();
+ assertEquals(10, sliced1.byteLength);
+ assertTrue(sliced1 instanceof ArrayBuffer);
+ assertFalse(sliced1 instanceof SharedArrayBuffer);
+ assertFalse(sliced1.resizable);
+
+ const gsab = CreateGrowableSharedArrayBuffer(10, 20);
+ const sliced2 = gsab.slice();
+ assertEquals(10, sliced2.byteLength);
+ assertFalse(sliced2 instanceof ArrayBuffer);
+ assertTrue(sliced2 instanceof SharedArrayBuffer);
+ assertFalse(sliced2.growable);
+})();
+
+(function SliceSpeciesConstructorReturnsResizable() {
+ class MyArrayBuffer extends ArrayBuffer {
+ static get [Symbol.species]() { return MyResizableArrayBuffer; }
+ }
+
+ class MyResizableArrayBuffer extends ArrayBuffer {
+ constructor(byteLength) {
+ super(byteLength, {maxByteLength: byteLength * 2});
+ }
+ }
+
+ const ab = new MyArrayBuffer(20);
+ const sliced1 = ab.slice();
+ assertTrue(sliced1.resizable);
+
+ class MySharedArrayBuffer extends SharedArrayBuffer {
+ static get [Symbol.species]() { return MyGrowableSharedArrayBuffer; }
+ }
+
+ class MyGrowableSharedArrayBuffer extends SharedArrayBuffer {
+ constructor(byteLength) {
+ super(byteLength, {maxByteLength: byteLength * 2});
+ }
+ }
+
+ const sab = new MySharedArrayBuffer(20);
+ const sliced2 = sab.slice();
+ assertTrue(sliced2.growable);
+})();
+
+(function SliceSpeciesConstructorResizes() {
+ let rab;
+ let resizeWhenConstructorCalled = false;
+ class MyArrayBuffer extends ArrayBuffer {
+ constructor(...params) {
+ super(...params);
+ if (resizeWhenConstructorCalled) {
+ rab.resize(2);
+ }
+ }
+ }
+ rab = new MyArrayBuffer(4, {maxByteLength: 8});
+ const taWrite = new Uint8Array(rab);
+ for (let i = 0; i < 4; ++i) {
+ taWrite[i] = 1;
+ }
+ assertEquals([1, 1, 1, 1], ToNumbers(taWrite));
+ resizeWhenConstructorCalled = true;
+ const sliced = rab.slice();
+ assertEquals(2, rab.byteLength);
+ assertEquals(4, sliced.byteLength);
+ assertEquals([1, 1, 0, 0], ToNumbers(new Uint8Array(sliced)));
+})();
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test-large.log b/deps/v8/test/mjsunit/tools/tickprocessor-test-large.log
index 6aa49159f9..5c966d1c32 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor-test-large.log
+++ b/deps/v8/test/mjsunit/tools/tickprocessor-test-large.log
@@ -171,7 +171,7 @@ code-creation,Builtin,2,5847,0x557426348760,1112,DeleteProperty
code-creation,Builtin,2,5859,0x557426348bc0,1972,CopyDataProperties
code-creation,Builtin,2,5871,0x557426349380,10024,SetDataProperties
code-creation,Builtin,2,5883,0x55742634bac0,28,Abort
-code-creation,Builtin,2,5895,0x55742634bae0,28,AbortCSAAssert
+code-creation,Builtin,2,5895,0x55742634bae0,28,AbortCSADcheck
code-creation,Builtin,2,5907,0x55742634bb00,12,EmptyFunction
code-creation,Builtin,2,5922,0x55742634bb20,12,Illegal
code-creation,Builtin,2,5934,0x55742634bb40,12,StrictPoisonPillThrower
diff --git a/deps/v8/test/mjsunit/typedarray-growablesharedarraybuffer.js b/deps/v8/test/mjsunit/typedarray-growablesharedarraybuffer.js
index fe18f4649c..0bf84a5a72 100644
--- a/deps/v8/test/mjsunit/typedarray-growablesharedarraybuffer.js
+++ b/deps/v8/test/mjsunit/typedarray-growablesharedarraybuffer.js
@@ -3,6 +3,7 @@
// found in the LICENSE file.
// Flags: --harmony-rab-gsab --allow-natives-syntax
+// Flags: --harmony-relative-indexing-methods
"use strict";
@@ -398,7 +399,7 @@ function CreateGrowableSharedArrayBuffer(byteLength, maxByteLength) {
// We can use the same GSAB for all the TAs below, since we won't modify it
// after writing the initial values.
const gsab = CreateGrowableSharedArrayBuffer(buffer_byte_length,
- 2 * buffer_byte_length);
+ 2 * buffer_byte_length);
const byte_offset = offset * ctor.BYTES_PER_ELEMENT;
// Write some data into the array.
@@ -461,7 +462,12 @@ function TestIterationAndGrow(ta, expected, gsab, grow_after,
let values = [];
let grown = false;
for (const value of ta) {
- values.push(Number(value));
+ if (value instanceof Array) {
+ // When iterating via entries(), the values will be arrays [key, value].
+ values.push([value[0], Number(value[1])]);
+ } else {
+ values.push(Number(value));
+ }
if (!grown && values.length == grow_after) {
gsab.grow(new_byte_length);
grown = true;
@@ -571,7 +577,7 @@ function TestIterationAndGrow(ta, expected, gsab, grow_after,
(function Destructuring() {
for (let ctor of ctors) {
const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
- 8 * ctor.BYTES_PER_ELEMENT);
+ 8 * ctor.BYTES_PER_ELEMENT);
const fixedLength = new ctor(gsab, 0, 4);
const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
const lengthTracking = new ctor(gsab, 0);
@@ -639,7 +645,7 @@ function TestIterationAndGrow(ta, expected, gsab, grow_after,
(function TestFill() {
for (let ctor of ctors) {
const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
- 8 * ctor.BYTES_PER_ELEMENT);
+ 8 * ctor.BYTES_PER_ELEMENT);
const fixedLength = new ctor(gsab, 0, 4);
const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
const lengthTracking = new ctor(gsab, 0);
@@ -687,3 +693,686 @@ function TestIterationAndGrow(ta, expected, gsab, grow_after,
assertEquals([15, 19, 19, 20, 16, 16], ReadDataFromBuffer(gsab, ctor));
}
})();
+
+(function At() {
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(gsab, 0, 4);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(gsab, 0);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ let ta_write = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(ta_write, i, i);
+ }
+
+ assertEquals(3, AtHelper(fixedLength, -1));
+ assertEquals(3, AtHelper(lengthTracking, -1));
+ assertEquals(3, AtHelper(fixedLengthWithOffset, -1));
+ assertEquals(3, AtHelper(lengthTrackingWithOffset, -1));
+
+ // Grow. New memory is zeroed.
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ assertEquals(3, AtHelper(fixedLength, -1));
+ assertEquals(0, AtHelper(lengthTracking, -1));
+ assertEquals(3, AtHelper(fixedLengthWithOffset, -1));
+ assertEquals(0, AtHelper(lengthTrackingWithOffset, -1));
+ }
+})();
+
+(function Slice() {
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(gsab, 0, 4);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(gsab, 0);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taWrite = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ const fixedLengthSlice = fixedLength.slice();
+ assertEquals([0, 1, 2, 3], ToNumbers(fixedLengthSlice));
+ assertTrue(fixedLengthSlice.buffer instanceof ArrayBuffer);
+ assertFalse(fixedLengthSlice.buffer instanceof SharedArrayBuffer);
+ assertFalse(fixedLengthSlice.buffer.resizable);
+
+ const fixedLengthWithOffsetSlice = fixedLengthWithOffset.slice();
+ assertEquals([2, 3], ToNumbers(fixedLengthWithOffsetSlice));
+ assertTrue(fixedLengthWithOffsetSlice.buffer instanceof ArrayBuffer);
+ assertFalse(fixedLengthWithOffsetSlice.buffer instanceof SharedArrayBuffer);
+ assertFalse(fixedLengthWithOffsetSlice.buffer.resizable);
+
+ const lengthTrackingSlice = lengthTracking.slice();
+ assertEquals([0, 1, 2, 3], ToNumbers(lengthTrackingSlice));
+ assertTrue(lengthTrackingSlice.buffer instanceof ArrayBuffer);
+ assertFalse(lengthTrackingSlice.buffer instanceof SharedArrayBuffer);
+ assertFalse(lengthTrackingSlice.buffer.resizable);
+
+ const lengthTrackingWithOffsetSlice = lengthTrackingWithOffset.slice();
+ assertEquals([2, 3], ToNumbers(lengthTrackingWithOffsetSlice));
+ assertTrue(lengthTrackingWithOffsetSlice.buffer instanceof ArrayBuffer);
+ assertFalse(lengthTrackingWithOffsetSlice.buffer instanceof
+ SharedArrayBuffer);
+ assertFalse(lengthTrackingWithOffsetSlice.buffer.resizable);
+
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ assertEquals([0, 1, 2, 3], ToNumbers(fixedLength.slice()));
+ assertEquals([2, 3], ToNumbers(fixedLengthWithOffset.slice()));
+ assertEquals([0, 1, 2, 3, 0, 0], ToNumbers(lengthTracking.slice()));
+ assertEquals([2, 3, 0, 0], ToNumbers(lengthTrackingWithOffset.slice()));
+
+ // Verify that the previously created slices aren't affected by the growing.
+ assertEquals([0, 1, 2, 3], ToNumbers(fixedLengthSlice));
+ assertEquals([2, 3], ToNumbers(fixedLengthWithOffsetSlice));
+ assertEquals([0, 1, 2, 3], ToNumbers(lengthTrackingSlice));
+ assertEquals([2, 3], ToNumbers(lengthTrackingWithOffsetSlice));
+ }
+})();
+
+(function SliceSpeciesCreateResizes() {
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+
+ const taWrite = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 1);
+ }
+
+ let resizeWhenConstructorCalled = false;
+ class MyArray extends ctor {
+ constructor(...params) {
+ super(...params);
+ if (resizeWhenConstructorCalled) {
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ }
+ }
+ };
+
+ const fixedLength = new MyArray(gsab, 0, 4);
+ resizeWhenConstructorCalled = true;
+ const a = fixedLength.slice();
+ assertEquals(4, a.length);
+ assertEquals([1, 1, 1, 1], ToNumbers(a));
+
+ assertEquals(6 * ctor.BYTES_PER_ELEMENT, gsab.byteLength);
+ }
+
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+
+ const taWrite = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 1);
+ }
+
+ let resizeWhenConstructorCalled = false;
+ class MyArray extends ctor {
+ constructor(...params) {
+ super(...params);
+ if (resizeWhenConstructorCalled) {
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ }
+ }
+ };
+
+ const lengthTracking = new MyArray(gsab);
+ resizeWhenConstructorCalled = true;
+ const a = lengthTracking.slice();
+ assertEquals(6 * ctor.BYTES_PER_ELEMENT, gsab.byteLength);
+ // The length of the resulting TypedArray is determined before
+ // TypedArraySpeciesCreate is called, and it doesn't change.
+ assertEquals(4, a.length);
+ assertEquals([1, 1, 1, 1], ToNumbers(a));
+ }
+})();
+
+(function CopyWithin() {
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(gsab, 0, 4);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(gsab, 0);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taWrite = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ // Orig. array: [0, 1, 2, 3]
+ // [0, 1, 2, 3] << fixedLength
+ // [2, 3] << fixedLengthWithOffset
+ // [0, 1, 2, 3, ...] << lengthTracking
+ // [2, 3, ...] << lengthTrackingWithOffset
+
+ fixedLength.copyWithin(0, 2);
+ assertEquals([2, 3, 2, 3], ToNumbers(fixedLength));
+
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ fixedLengthWithOffset.copyWithin(0, 1);
+ assertEquals([3, 3], ToNumbers(fixedLengthWithOffset));
+
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ lengthTracking.copyWithin(0, 2);
+ assertEquals([2, 3, 2, 3], ToNumbers(lengthTracking));
+
+ lengthTrackingWithOffset.copyWithin(0, 1);
+ assertEquals([3, 3], ToNumbers(lengthTrackingWithOffset));
+
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ // Orig. array: [0, 1, 2, 3, 4, 5]
+ // [0, 1, 2, 3] << fixedLength
+ // [2, 3] << fixedLengthWithOffset
+ // [0, 1, 2, 3, 4, 5, ...] << lengthTracking
+ // [2, 3, 4, 5, ...] << lengthTrackingWithOffset
+
+ fixedLength.copyWithin(0, 2);
+ assertEquals([2, 3, 2, 3], ToNumbers(fixedLength));
+
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ fixedLengthWithOffset.copyWithin(0, 1);
+ assertEquals([3, 3], ToNumbers(fixedLengthWithOffset));
+
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ // [0, 1, 2, 3, 4, 5, ...] << lengthTracking
+ // target ^ ^ start
+ lengthTracking.copyWithin(0, 2);
+ assertEquals([2, 3, 4, 5, 4, 5], ToNumbers(lengthTracking));
+
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ // [2, 3, 4, 5, ...] << lengthTrackingWithOffset
+ // target ^ ^ start
+ lengthTrackingWithOffset.copyWithin(0, 1);
+ assertEquals([3, 4, 5, 5], ToNumbers(lengthTrackingWithOffset));
+ }
+})();
+
+(function CopyWithinParameterConversionGrows() {
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(lengthTracking, i, i);
+ }
+
+ const evil = { valueOf: () => { gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ WriteToTypedArray(lengthTracking, 4, 4);
+ WriteToTypedArray(lengthTracking, 5, 5);
+ return 0;} };
+ // Orig. array: [0, 1, 2, 3] [4, 5]
+ // ^ ^ ^ new elements
+ // target start
+ lengthTracking.copyWithin(evil, 2);
+ assertEquals([2, 3, 2, 3, 4, 5], ToNumbers(lengthTracking));
+ }
+})();
+
+(function EntriesKeysValues() {
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(gsab, 0, 4);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(gsab, 0);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taWrite = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+
+ assertEquals([0, 2, 4, 6], ToNumbersWithEntries(fixedLength));
+ assertEquals([0, 2, 4, 6], ValuesToNumbers(fixedLength));
+ assertEquals([0, 1, 2, 3], Keys(fixedLength));
+
+ assertEquals([4, 6], ToNumbersWithEntries(fixedLengthWithOffset));
+ assertEquals([4, 6], ValuesToNumbers(fixedLengthWithOffset));
+ assertEquals([0, 1], Keys(fixedLengthWithOffset));
+
+ assertEquals([0, 2, 4, 6], ToNumbersWithEntries(lengthTracking));
+ assertEquals([0, 2, 4, 6], ValuesToNumbers(lengthTracking));
+ assertEquals([0, 1, 2, 3], Keys(lengthTracking));
+
+ assertEquals([4, 6], ToNumbersWithEntries(lengthTrackingWithOffset));
+ assertEquals([4, 6], ValuesToNumbers(lengthTrackingWithOffset));
+ assertEquals([0, 1], Keys(lengthTrackingWithOffset));
+
+ // Grow.
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6, 8, 10]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, 8, 10, ...] << lengthTracking
+ // [4, 6, 8, 10, ...] << lengthTrackingWithOffset
+
+ assertEquals([0, 2, 4, 6], ToNumbersWithEntries(fixedLength));
+ assertEquals([0, 2, 4, 6], ValuesToNumbers(fixedLength));
+ assertEquals([0, 1, 2, 3], Keys(fixedLength));
+
+ assertEquals([4, 6], ToNumbersWithEntries(fixedLengthWithOffset));
+ assertEquals([4, 6], ValuesToNumbers(fixedLengthWithOffset));
+ assertEquals([0, 1], Keys(fixedLengthWithOffset));
+
+ assertEquals([0, 2, 4, 6, 8, 10], ToNumbersWithEntries(lengthTracking));
+ assertEquals([0, 2, 4, 6, 8, 10], ValuesToNumbers(lengthTracking));
+ assertEquals([0, 1, 2, 3, 4, 5], Keys(lengthTracking));
+
+ assertEquals([4, 6, 8, 10], ToNumbersWithEntries(lengthTrackingWithOffset));
+ assertEquals([4, 6, 8, 10], ValuesToNumbers(lengthTrackingWithOffset));
+ assertEquals([0, 1, 2, 3], Keys(lengthTrackingWithOffset));
+ }
+})();
+
+(function EntriesKeysValuesGrowMidIteration() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateGsabForTest(ctor) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return gsab;
+ }
+
+ // Iterating with entries() (the 4 loops below).
+ for (let ctor of ctors) {
+ const gsab = CreateGsabForTest(ctor);
+ const fixedLength = new ctor(gsab, 0, 4);
+
+ // The fixed length array is not affected by resizing.
+ TestIterationAndGrow(fixedLength.entries(),
+ [[0, 0], [1, 2], [2, 4], [3, 6]],
+ gsab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const gsab = CreateGsabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+
+ // The fixed length array is not affected by resizing.
+ TestIterationAndGrow(fixedLengthWithOffset.entries(),
+ [[0, 4], [1, 6]],
+ gsab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const gsab = CreateGsabForTest(ctor);
+ const lengthTracking = new ctor(gsab, 0);
+
+ TestIterationAndGrow(lengthTracking.entries(),
+ [[0, 0], [1, 2], [2, 4], [3, 6], [4, 0], [5, 0]],
+ gsab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const gsab = CreateGsabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ TestIterationAndGrow(lengthTrackingWithOffset.entries(),
+ [[0, 4], [1, 6], [2, 0], [3, 0]],
+ gsab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ // Iterating with keys() (the 4 loops below).
+ for (let ctor of ctors) {
+ const gsab = CreateGsabForTest(ctor);
+ const fixedLength = new ctor(gsab, 0, 4);
+
+ // The fixed length array is not affected by resizing.
+ TestIterationAndGrow(fixedLength.keys(),
+ [0, 1, 2, 3],
+ gsab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const gsab = CreateGsabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+
+ // The fixed length array is not affected by resizing.
+ TestIterationAndGrow(fixedLengthWithOffset.keys(),
+ [0, 1],
+ gsab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const gsab = CreateGsabForTest(ctor);
+ const lengthTracking = new ctor(gsab, 0);
+
+ TestIterationAndGrow(lengthTracking.keys(),
+ [0, 1, 2, 3, 4, 5],
+ gsab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const gsab = CreateGsabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ TestIterationAndGrow(lengthTrackingWithOffset.keys(),
+ [0, 1, 2, 3],
+ gsab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ // Iterating with values() (the 4 loops below).
+ for (let ctor of ctors) {
+ const gsab = CreateGsabForTest(ctor);
+ const fixedLength = new ctor(gsab, 0, 4);
+
+ // The fixed length array is not affected by resizing.
+ TestIterationAndGrow(fixedLength.values(),
+ [0, 2, 4, 6],
+ gsab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const gsab = CreateGsabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+
+ // The fixed length array is not affected by resizing.
+ TestIterationAndGrow(fixedLengthWithOffset.values(),
+ [4, 6],
+ gsab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const gsab = CreateGsabForTest(ctor);
+ const lengthTracking = new ctor(gsab, 0);
+
+ TestIterationAndGrow(lengthTracking.values(),
+ [0, 2, 4, 6, 0, 0],
+ gsab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const gsab = CreateGsabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ TestIterationAndGrow(lengthTrackingWithOffset.values(),
+ [4, 6, 0, 0],
+ gsab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+})();
+
+(function EverySome() {
+ for (let ctor of ctors) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(gsab, 0, 4);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(gsab, 0);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taWrite = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+
+ function div3(n) {
+ return Number(n) % 3 == 0;
+ }
+
+ function even(n) {
+ return Number(n) % 2 == 0;
+ }
+
+ function over10(n) {
+ return Number(n) > 10;
+ }
+
+ assertFalse(fixedLength.every(div3));
+ assertTrue(fixedLength.every(even));
+ assertTrue(fixedLength.some(div3));
+ assertFalse(fixedLength.some(over10));
+
+ assertFalse(fixedLengthWithOffset.every(div3));
+ assertTrue(fixedLengthWithOffset.every(even));
+ assertTrue(fixedLengthWithOffset.some(div3));
+ assertFalse(fixedLengthWithOffset.some(over10));
+
+ assertFalse(lengthTracking.every(div3));
+ assertTrue(lengthTracking.every(even));
+ assertTrue(lengthTracking.some(div3));
+ assertFalse(lengthTracking.some(over10));
+
+ assertFalse(lengthTrackingWithOffset.every(div3));
+ assertTrue(lengthTrackingWithOffset.every(even));
+ assertTrue(lengthTrackingWithOffset.some(div3));
+ assertFalse(lengthTrackingWithOffset.some(over10));
+
+ // Grow.
+ gsab.grow(6 * ctor.BYTES_PER_ELEMENT);
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6, 8, 10]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, 8, 10, ...] << lengthTracking
+ // [4, 6, 8, 10, ...] << lengthTrackingWithOffset
+
+ assertFalse(fixedLength.every(div3));
+ assertTrue(fixedLength.every(even));
+ assertTrue(fixedLength.some(div3));
+ assertFalse(fixedLength.some(over10));
+
+ assertFalse(fixedLengthWithOffset.every(div3));
+ assertTrue(fixedLengthWithOffset.every(even));
+ assertTrue(fixedLengthWithOffset.some(div3));
+ assertFalse(fixedLengthWithOffset.some(over10));
+
+ assertFalse(lengthTracking.every(div3));
+ assertTrue(lengthTracking.every(even));
+ assertTrue(lengthTracking.some(div3));
+ assertFalse(lengthTracking.some(over10));
+
+ assertFalse(lengthTrackingWithOffset.every(div3));
+ assertTrue(lengthTrackingWithOffset.every(even));
+ assertTrue(lengthTrackingWithOffset.some(div3));
+ assertFalse(lengthTrackingWithOffset.some(over10));
+ }
+})();
+
+(function EveryGrowMidIteration() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateGsabForTest(ctor) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return gsab;
+ }
+
+ let values;
+ let gsab;
+ let growAfter;
+ let growTo;
+ function myFunc(n) {
+ if (n == undefined) {
+ values.push(n);
+ } else {
+ values.push(Number(n));
+ }
+ if (values.length == growAfter) {
+ gsab.grow(growTo);
+ }
+ return true;
+ }
+
+ for (let ctor of ctors) {
+ gsab = CreateGsabForTest(ctor);
+ const fixedLength = new ctor(gsab, 0, 4);
+ values = [];
+ growAfter = 2;
+ growTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertTrue(fixedLength.every(myFunc));
+ assertEquals([0, 2, 4, 6], values);
+ }
+
+ for (let ctor of ctors) {
+ gsab = CreateGsabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ values = [];
+ growAfter = 1;
+ growTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertTrue(fixedLengthWithOffset.every(myFunc));
+ assertEquals([4, 6], values);
+ }
+
+ for (let ctor of ctors) {
+ gsab = CreateGsabForTest(ctor);
+ const lengthTracking = new ctor(gsab, 0);
+ values = [];
+ growAfter = 2;
+ growTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertTrue(lengthTracking.every(myFunc));
+ assertEquals([0, 2, 4, 6], values);
+ }
+
+ for (let ctor of ctors) {
+ gsab = CreateGsabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+ values = [];
+ growAfter = 1;
+ growTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertTrue(lengthTrackingWithOffset.every(myFunc));
+ assertEquals([4, 6], values);
+ }
+})();
+
+(function SomeGrowMidIteration() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateGsabForTest(ctor) {
+ const gsab = CreateGrowableSharedArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(gsab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return gsab;
+ }
+
+ let values;
+ let gsab;
+ let growAfter;
+ let growTo;
+ function myFunc(n) {
+ if (n == undefined) {
+ values.push(n);
+ } else {
+ values.push(Number(n));
+ }
+ if (values.length == growAfter) {
+ gsab.grow(growTo);
+ }
+ return false;
+ }
+
+ for (let ctor of ctors) {
+ gsab = CreateGsabForTest(ctor);
+ const fixedLength = new ctor(gsab, 0, 4);
+ values = [];
+ growAfter = 2;
+ growTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertFalse(fixedLength.some(myFunc));
+ assertEquals([0, 2, 4, 6], values);
+ }
+
+ for (let ctor of ctors) {
+ gsab = CreateGsabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ values = [];
+ gsab = gsab;
+ growAfter = 1;
+ growTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertFalse(fixedLengthWithOffset.some(myFunc));
+ assertEquals([4, 6], values);
+ }
+
+ for (let ctor of ctors) {
+ gsab = CreateGsabForTest(ctor);
+ const lengthTracking = new ctor(gsab, 0);
+ values = [];
+ growAfter = 2;
+ growTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertFalse(lengthTracking.some(myFunc));
+ assertEquals([0, 2, 4, 6], values);
+ }
+
+ for (let ctor of ctors) {
+ gsab = CreateGsabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(gsab, 2 * ctor.BYTES_PER_ELEMENT);
+ values = [];
+ growAfter = 1;
+ growTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertFalse(lengthTrackingWithOffset.some(myFunc));
+ assertEquals([4, 6], values);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/typedarray-helpers.js b/deps/v8/test/mjsunit/typedarray-helpers.js
index e4996456c6..4482d548ba 100644
--- a/deps/v8/test/mjsunit/typedarray-helpers.js
+++ b/deps/v8/test/mjsunit/typedarray-helpers.js
@@ -20,6 +20,10 @@ const ctors = [
MyBigInt64Array,
];
+function CreateResizableArrayBuffer(byteLength, maxByteLength) {
+ return new ArrayBuffer(byteLength, {maxByteLength: maxByteLength});
+}
+
function ReadDataFromBuffer(ab, ctor) {
let result = [];
const ta = new ctor(ab, 0, ab.byteLength / ctor.BYTES_PER_ELEMENT);
@@ -46,10 +50,45 @@ function ToNumbers(array) {
return result;
}
-function FillHelper(ta, n, start, end) {
- if (ta instanceof BigInt64Array || ta instanceof BigUint64Array) {
- ta.fill(BigInt(n), start, end);
+function ToNumbersWithEntries(array) {
+ let result = [];
+ let expectedKey = 0;
+ for (let [key, value] of array.entries()) {
+ assertEquals(expectedKey, key);
+ ++expectedKey;
+ result.push(Number(value));
+ }
+ return result;
+}
+
+function Keys(array) {
+ let result = [];
+ for (let key of array.keys()) {
+ result.push(key);
+ }
+ return result;
+}
+
+function ValuesToNumbers(array) {
+ let result = [];
+ for (let value of array.values()) {
+ result.push(Number(value));
+ }
+ return result;
+}
+
+function AtHelper(array, index) {
+ let result = array.at(index);
+ if (typeof result == 'bigint') {
+ return Number(result);
+ }
+ return result;
+}
+
+function FillHelper(array, n, start, end) {
+ if (array instanceof BigInt64Array || array instanceof BigUint64Array) {
+ array.fill(BigInt(n), start, end);
} else {
- ta.fill(n, start, end);
+ array.fill(n, start, end);
}
}
diff --git a/deps/v8/test/mjsunit/typedarray-resizablearraybuffer-detach.js b/deps/v8/test/mjsunit/typedarray-resizablearraybuffer-detach.js
index 69ad91e693..f68cad1111 100644
--- a/deps/v8/test/mjsunit/typedarray-resizablearraybuffer-detach.js
+++ b/deps/v8/test/mjsunit/typedarray-resizablearraybuffer-detach.js
@@ -6,25 +6,7 @@
"use strict";
-class MyUint8Array extends Uint8Array {};
-
-const ctors = [
- Uint8Array,
- Int8Array,
- Uint16Array,
- Int16Array,
- Int32Array,
- Float32Array,
- Float64Array,
- Uint8ClampedArray,
- BigUint64Array,
- BigInt64Array,
- MyUint8Array
-];
-
-function CreateResizableArrayBuffer(byteLength, maxByteLength) {
- return new ArrayBuffer(byteLength, {maxByteLength: maxByteLength});
-}
+d8.file.execute('test/mjsunit/typedarray-helpers.js');
(function ConstructorThrowsIfBufferDetached() {
const rab = CreateResizableArrayBuffer(40, 80);
@@ -140,3 +122,169 @@ function CreateResizableArrayBuffer(byteLength, maxByteLength) {
assertEquals(undefined, i8a[2]);
}
})();
+
+(function FillParameterConversionDetaches() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let evil = { valueOf: () => { %ArrayBufferDetach(rab); return 1;}};
+ // The length is read after converting the first parameter ('value'), so the
+ // detaching parameter has to be the 2nd ('start') or 3rd ('end').
+ assertThrows(function() {
+ FillHelper(fixedLength, 1, 0, evil);
+ }, TypeError);
+ }
+})();
+
+(function CopyWithinParameterConversionDetaches() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let evil = { valueOf: () => { %ArrayBufferDetach(rab); return 2;}};
+ assertThrows(function() {
+ fixedLength.copyWithin(evil, 0, 1);
+ }, TypeError);
+ }
+})();
+
+(function EveryDetachMidIteration() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateRabForTest(ctor) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ let values = [];
+ let rab;
+ let detachAfter;
+ function myFunc(n) {
+ if (n == undefined) {
+ values.push(n);
+ } else {
+ values.push(Number(n));
+ }
+ if (values.length == detachAfter) {
+ %ArrayBufferDetach(rab);
+ }
+ return true;
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+ values = [];
+ detachAfter = 2;
+ assertTrue(fixedLength.every(myFunc));
+ assertEquals([0, 2, undefined, undefined], values);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ values = [];
+ detachAfter = 1;
+ assertTrue(fixedLengthWithOffset.every(myFunc));
+ assertEquals([4, undefined], values);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ values = [];
+ detachAfter = 2;
+ assertTrue(lengthTracking.every(myFunc));
+ assertEquals([0, 2, undefined, undefined], values);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+ values = [];
+ detachAfter = 1;
+ assertTrue(lengthTrackingWithOffset.every(myFunc));
+ assertEquals([4, undefined], values);
+ }
+})();
+
+(function SomeDetachMidIteration() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateRabForTest(ctor) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ let values;
+ let rab;
+ let detachAfter;
+ function myFunc(n) {
+ if (n == undefined) {
+ values.push(n);
+ } else {
+ values.push(Number(n));
+ }
+ if (values.length == detachAfter) {
+ %ArrayBufferDetach(rab);
+ }
+ return false;
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+ values = [];
+ detachAfter = 2;
+ assertFalse(fixedLength.some(myFunc));
+ assertEquals([0, 2, undefined, undefined], values);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ values = [];
+ detachAfter = 1;
+ assertFalse(fixedLengthWithOffset.some(myFunc));
+ assertEquals([4, undefined], values);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ values = [];
+ detachAfter = 2;
+ assertFalse(lengthTracking.some(myFunc));
+ assertEquals([0, 2, undefined, undefined], values);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+ values = [];
+ detachAfter = 1;
+ assertFalse(lengthTrackingWithOffset.some(myFunc));
+ assertEquals([4, undefined], values);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js b/deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js
index 9934683b23..8aea26c7b3 100644
--- a/deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js
+++ b/deps/v8/test/mjsunit/typedarray-resizablearraybuffer.js
@@ -3,15 +3,12 @@
// found in the LICENSE file.
// Flags: --harmony-rab-gsab --allow-natives-syntax
+// Flags: --harmony-relative-indexing-methods
"use strict";
d8.file.execute('test/mjsunit/typedarray-helpers.js');
-function CreateResizableArrayBuffer(byteLength, maxByteLength) {
- return new ArrayBuffer(byteLength, {maxByteLength: maxByteLength});
-}
-
(function TypedArrayPrototype() {
const rab = CreateResizableArrayBuffer(40, 80);
const ab = new ArrayBuffer(80);
@@ -747,7 +744,12 @@ function TestIterationAndResize(ta, expected, rab, resize_after,
let values = [];
let resized = false;
for (const value of ta) {
- values.push(Number(value));
+ if (value instanceof Array) {
+ // When iterating via entries(), the values will be arrays [key, value].
+ values.push([value[0], Number(value[1])]);
+ } else {
+ values.push(Number(value));
+ }
if (!resized && values.length == resize_after) {
rab.resize(new_byte_length);
resized = true;
@@ -1156,3 +1158,1263 @@ function TestIterationAndResize(ta, expected, rab, resize_after,
assertThrows(() => { FillHelper(fixedLength, 3, 1, evil); }, TypeError);
}
})();
+
+(function At() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(rab, 0);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ let ta_write = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(ta_write, i, i);
+ }
+
+ assertEquals(3, AtHelper(fixedLength, -1));
+ assertEquals(3, AtHelper(lengthTracking, -1));
+ assertEquals(3, AtHelper(fixedLengthWithOffset, -1));
+ assertEquals(3, AtHelper(lengthTrackingWithOffset, -1));
+
+ // Shrink so that fixed length TAs go out of bounds.
+ rab.resize(3 * ctor.BYTES_PER_ELEMENT);
+
+ assertThrows(() => { AtHelper(fixedLength, -1); });
+ assertThrows(() => { AtHelper(fixedLengthWithOffset, -1); });
+
+ assertEquals(2, AtHelper(lengthTracking, -1));
+ assertEquals(2, AtHelper(lengthTrackingWithOffset, -1));
+
+ // Shrink so that the TAs with offset go out of bounds.
+ rab.resize(1 * ctor.BYTES_PER_ELEMENT);
+
+ assertThrows(() => { AtHelper(fixedLength, -1); });
+ assertThrows(() => { AtHelper(fixedLengthWithOffset, -1); });
+ assertEquals(0, AtHelper(lengthTracking, -1));
+ assertThrows(() => { AtHelper(lengthTrackingWithOffset, -1); });
+
+ // Grow so that all TAs are back in-bounds. New memory is zeroed.
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ assertEquals(0, AtHelper(fixedLength, -1));
+ assertEquals(0, AtHelper(lengthTracking, -1));
+ assertEquals(0, AtHelper(fixedLengthWithOffset, -1));
+ assertEquals(0, AtHelper(lengthTrackingWithOffset, -1));
+ }
+})();
+
+(function AtParameterConversionResizes() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ let evil = { valueOf: () => { rab.resize(2); return 0;}};
+ assertEquals(undefined, AtHelper(fixedLength, evil));
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+
+ let evil = { valueOf: () => { rab.resize(2); return -1;}};
+ // The TypedArray is *not* out of bounds since it's length-tracking.
+ assertEquals(undefined, AtHelper(lengthTracking, evil));
+ }
+})();
+
+(function Slice() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(rab, 0);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ const fixedLengthSlice = fixedLength.slice();
+ assertEquals([0, 1, 2, 3], ToNumbers(fixedLengthSlice));
+ assertFalse(fixedLengthSlice.buffer.resizable);
+
+ const fixedLengthWithOffsetSlice = fixedLengthWithOffset.slice();
+ assertEquals([2, 3], ToNumbers(fixedLengthWithOffsetSlice));
+ assertFalse(fixedLengthWithOffsetSlice.buffer.resizable);
+
+ const lengthTrackingSlice = lengthTracking.slice();
+ assertEquals([0, 1, 2, 3], ToNumbers(lengthTrackingSlice));
+ assertFalse(lengthTrackingSlice.buffer.resizable);
+
+ const lengthTrackingWithOffsetSlice = lengthTrackingWithOffset.slice();
+ assertEquals([2, 3], ToNumbers(lengthTrackingWithOffsetSlice));
+ assertFalse(lengthTrackingWithOffsetSlice.buffer.resizable);
+
+ // Shrink so that fixed length TAs go out of bounds.
+ rab.resize(3 * ctor.BYTES_PER_ELEMENT);
+
+ assertThrows(() => { fixedLength.slice(); });
+ assertThrows(() => { fixedLengthWithOffset.slice(); });
+ assertEquals([0, 1, 2], ToNumbers(lengthTracking.slice()));
+ assertEquals([2], ToNumbers(lengthTrackingWithOffset.slice()));
+
+ // Shrink so that the TAs with offset go out of bounds.
+ rab.resize(1 * ctor.BYTES_PER_ELEMENT);
+
+ assertThrows(() => { fixedLength.slice(); });
+ assertThrows(() => { fixedLengthWithOffset.slice(); });
+ assertEquals([0], ToNumbers(lengthTracking.slice()));
+ assertThrows(() => { lengthTrackingWithOffset.slice(); });
+
+ // Shrink to zero.
+ rab.resize(0);
+
+ assertThrows(() => { fixedLength.slice(); });
+ assertThrows(() => { fixedLengthWithOffset.slice(); });
+ assertEquals([], ToNumbers(lengthTracking.slice()));
+ assertThrows(() => { lengthTrackingWithOffset.slice(); });
+
+ // Verify that the previously created slices aren't affected by the
+ // shrinking.
+ assertEquals([0, 1, 2, 3], ToNumbers(fixedLengthSlice));
+ assertEquals([2, 3], ToNumbers(fixedLengthWithOffsetSlice));
+ assertEquals([0, 1, 2, 3], ToNumbers(lengthTrackingSlice));
+ assertEquals([2, 3], ToNumbers(lengthTrackingWithOffsetSlice));
+
+ // Grow so that all TAs are back in-bounds. New memory is zeroed.
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ assertEquals([0, 0, 0, 0], ToNumbers(fixedLength.slice()));
+ assertEquals([0, 0], ToNumbers(fixedLengthWithOffset.slice()));
+ assertEquals([0, 0, 0, 0, 0, 0], ToNumbers(lengthTracking.slice()));
+ assertEquals([0, 0, 0, 0], ToNumbers(lengthTrackingWithOffset.slice()));
+ }
+})();
+
+(function SliceSpeciesCreateResizes() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+
+ let resizeWhenConstructorCalled = false;
+ class MyArray extends ctor {
+ constructor(...params) {
+ super(...params);
+ if (resizeWhenConstructorCalled) {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ }
+ }
+ };
+
+ const fixedLength = new MyArray(rab, 0, 4);
+ resizeWhenConstructorCalled = true;
+ assertThrows(() => { fixedLength.slice(); }, TypeError);
+ assertEquals(2 * ctor.BYTES_PER_ELEMENT, rab.byteLength);
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 1);
+ }
+
+ let resizeWhenConstructorCalled = false;
+ class MyArray extends ctor {
+ constructor(...params) {
+ super(...params);
+ if (resizeWhenConstructorCalled) {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ }
+ }
+ };
+
+ const lengthTracking = new MyArray(rab);
+ resizeWhenConstructorCalled = true;
+ const a = lengthTracking.slice();
+ assertEquals(2 * ctor.BYTES_PER_ELEMENT, rab.byteLength);
+ // The length of the resulting TypedArray is determined before
+ // TypedArraySpeciesCreate is called, and it doesn't change.
+ assertEquals(4, a.length);
+ assertEquals([1, 1, 0, 0], ToNumbers(a));
+ }
+
+ // Test that the (start, end) parameters are computed based on the original
+ // length.
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 1);
+ }
+
+ let resizeWhenConstructorCalled = false;
+ class MyArray extends ctor {
+ constructor(...params) {
+ super(...params);
+ if (resizeWhenConstructorCalled) {
+ rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ }
+ }
+ };
+
+ const lengthTracking = new MyArray(rab);
+ resizeWhenConstructorCalled = true;
+ const a = lengthTracking.slice(-3, -1);
+ assertEquals(2 * ctor.BYTES_PER_ELEMENT, rab.byteLength);
+ // The length of the resulting TypedArray is determined before
+ // TypedArraySpeciesCreate is called, and it doesn't change.
+ assertEquals(2, a.length);
+ assertEquals([1, 0], ToNumbers(a));
+ }
+
+ // Test where the buffer gets resized "between elements".
+ {
+ const rab = CreateResizableArrayBuffer(8, 16);
+
+ // Fill the buffer with 1-bits.
+ const taWrite = new Uint8Array(rab);
+ for (let i = 0; i < 8; ++i) {
+ WriteToTypedArray(taWrite, i, 255);
+ }
+
+ let resizeWhenConstructorCalled = false;
+ class MyArray extends Uint16Array {
+ constructor(...params) {
+ super(...params);
+ if (resizeWhenConstructorCalled) {
+ // Resize so that the size is not a multiple of the element size.
+ rab.resize(5);
+ }
+ }
+ };
+
+ const lengthTracking = new MyArray(rab);
+ assertEquals([65535, 65535, 65535, 65535], ToNumbers(lengthTracking));
+ resizeWhenConstructorCalled = true;
+ const a = lengthTracking.slice();
+ assertEquals(5, rab.byteLength);
+ assertEquals(4, a.length); // The old length is used.
+ assertEquals(65535, a[0]);
+ assertEquals(65535, a[1]);
+ assertEquals(0, a[2]);
+ assertEquals(0, a[3]);
+ }
+})();
+
+(function CopyWithin() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(rab, 0);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ // Orig. array: [0, 1, 2, 3]
+ // [0, 1, 2, 3] << fixedLength
+ // [2, 3] << fixedLengthWithOffset
+ // [0, 1, 2, 3, ...] << lengthTracking
+ // [2, 3, ...] << lengthTrackingWithOffset
+
+ fixedLength.copyWithin(0, 2);
+ assertEquals([2, 3, 2, 3], ToNumbers(fixedLength));
+
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ fixedLengthWithOffset.copyWithin(0, 1);
+ assertEquals([3, 3], ToNumbers(fixedLengthWithOffset));
+
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ lengthTracking.copyWithin(0, 2);
+ assertEquals([2, 3, 2, 3], ToNumbers(lengthTracking));
+
+ lengthTrackingWithOffset.copyWithin(0, 1);
+ assertEquals([3, 3], ToNumbers(lengthTrackingWithOffset));
+
+ // Shrink so that fixed length TAs go out of bounds.
+ rab.resize(3 * ctor.BYTES_PER_ELEMENT);
+ for (let i = 0; i < 3; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ // Orig. array: [0, 1, 2]
+ // [0, 1, 2, ...] << lengthTracking
+ // [2, ...] << lengthTrackingWithOffset
+
+ assertThrows(() => { fixedLength.copyWithin(0, 1); });
+ assertThrows(() => { fixedLengthWithOffset.copyWithin(0, 1); });
+ lengthTracking.copyWithin(0, 1);
+ assertEquals([1, 2, 2], ToNumbers(lengthTracking));
+ lengthTrackingWithOffset.copyWithin(0, 1);
+ assertEquals([2], ToNumbers(lengthTrackingWithOffset));
+
+ // Shrink so that the TAs with offset go out of bounds.
+ rab.resize(1 * ctor.BYTES_PER_ELEMENT);
+ WriteToTypedArray(taWrite, 0, 0);
+
+ assertThrows(() => { fixedLength.copyWithin(0, 1, 1); });
+ assertThrows(() => { fixedLengthWithOffset.copyWithin(0, 1, 1); });
+ lengthTracking.copyWithin(0, 0, 1);
+ assertEquals([0], ToNumbers(lengthTracking));
+ assertThrows(() => { lengthTrackingWithOffset.copyWithin(0, 1, 1); });
+
+ // Shrink to zero.
+ rab.resize(0);
+
+ assertThrows(() => { fixedLength.copyWithin(0, 1, 1); });
+ assertThrows(() => { fixedLengthWithOffset.copyWithin(0, 1, 1); });
+ lengthTracking.copyWithin(0, 0, 1);
+ assertEquals([], ToNumbers(lengthTracking));
+ assertThrows(() => { lengthTrackingWithOffset.copyWithin(0, 1, 1); });
+
+ // Grow so that all TAs are back in-bounds.
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ // Orig. array: [0, 1, 2, 3, 4, 5]
+ // [0, 1, 2, 3] << fixedLength
+ // [2, 3] << fixedLengthWithOffset
+ // [0, 1, 2, 3, 4, 5, ...] << lengthTracking
+ // [2, 3, 4, 5, ...] << lengthTrackingWithOffset
+
+ fixedLength.copyWithin(0, 2);
+ assertEquals([2, 3, 2, 3], ToNumbers(fixedLength));
+
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ fixedLengthWithOffset.copyWithin(0, 1);
+ assertEquals([3, 3], ToNumbers(fixedLengthWithOffset));
+
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ // [0, 1, 2, 3, 4, 5, ...] << lengthTracking
+ // target ^ ^ start
+ lengthTracking.copyWithin(0, 2);
+ assertEquals([2, 3, 4, 5, 4, 5], ToNumbers(lengthTracking));
+
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taWrite, i, i);
+ }
+
+ // [2, 3, 4, 5, ...] << lengthTrackingWithOffset
+ // target ^ ^ start
+ lengthTrackingWithOffset.copyWithin(0, 1);
+ assertEquals([3, 4, 5, 5], ToNumbers(lengthTrackingWithOffset));
+ }
+})();
+
+(function CopyWithinParameterConversionShrinks() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ const evil = { valueOf: () => { rab.resize(2 * ctor.BYTES_PER_ELEMENT);
+ return 2;}};
+ assertThrows(() => { fixedLength.copyWithin(evil, 0, 1); }, TypeError);
+ rab.resize(4 * ctor.BYTES_PER_ELEMENT);
+ assertThrows(() => { fixedLength.copyWithin(0, evil, 3); }, TypeError);
+ rab.resize(4 * ctor.BYTES_PER_ELEMENT);
+ assertThrows(() => { fixedLength.copyWithin(0, 1, evil); }, TypeError);
+ }
+})();
+
+(function CopyWithinParameterConversionGrows() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const lengthTracking = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(lengthTracking, i, i);
+ }
+
+ const evil = { valueOf: () => { rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ WriteToTypedArray(lengthTracking, 4, 4);
+ WriteToTypedArray(lengthTracking, 5, 5);
+ return 0;} };
+ // Orig. array: [0, 1, 2, 3] [4, 5]
+ // ^ ^ ^ new elements
+ // target start
+ lengthTracking.copyWithin(evil, 2);
+ assertEquals([2, 3, 2, 3, 4, 5], ToNumbers(lengthTracking));
+
+ rab.resize(4 * ctor.BYTES_PER_ELEMENT);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(lengthTracking, i, i);
+ }
+
+ // Orig. array: [0, 1, 2, 3] [4, 5]
+ // ^ ^ ^ new elements
+ // start target
+ lengthTracking.copyWithin(2, evil);
+ assertEquals([0, 1, 0, 1, 4, 5], ToNumbers(lengthTracking));
+ }
+})();
+
+(function EntriesKeysValues() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(rab, 0);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+
+ assertEquals([0, 2, 4, 6], ToNumbersWithEntries(fixedLength));
+ assertEquals([0, 2, 4, 6], ValuesToNumbers(fixedLength));
+ assertEquals([0, 1, 2, 3], Keys(fixedLength));
+
+ assertEquals([4, 6], ToNumbersWithEntries(fixedLengthWithOffset));
+ assertEquals([4, 6], ValuesToNumbers(fixedLengthWithOffset));
+ assertEquals([0, 1], Keys(fixedLengthWithOffset));
+
+ assertEquals([0, 2, 4, 6], ToNumbersWithEntries(lengthTracking));
+ assertEquals([0, 2, 4, 6], ValuesToNumbers(lengthTracking));
+ assertEquals([0, 1, 2, 3], Keys(lengthTracking));
+
+ assertEquals([4, 6], ToNumbersWithEntries(lengthTrackingWithOffset));
+ assertEquals([4, 6], ValuesToNumbers(lengthTrackingWithOffset));
+ assertEquals([0, 1], Keys(lengthTrackingWithOffset));
+
+ // Shrink so that fixed length TAs go out of bounds.
+ rab.resize(3 * ctor.BYTES_PER_ELEMENT);
+
+ // Orig. array: [0, 2, 4]
+ // [0, 2, 4, ...] << lengthTracking
+ // [4, ...] << lengthTrackingWithOffset
+
+ assertThrows(() => { fixedLength.entries(); });
+ assertThrows(() => { fixedLength.values(); });
+ assertThrows(() => { fixedLength.keys(); });
+ assertThrows(() => { fixedLengthWithOffset.entries(); });
+ assertThrows(() => { fixedLengthWithOffset.values(); });
+ assertThrows(() => { fixedLengthWithOffset.keys(); });
+
+ assertEquals([0, 2, 4], ToNumbersWithEntries(lengthTracking));
+ assertEquals([0, 2, 4], ValuesToNumbers(lengthTracking));
+ assertEquals([0, 1, 2], Keys(lengthTracking));
+
+ assertEquals([4], ToNumbersWithEntries(lengthTrackingWithOffset));
+ assertEquals([4], ValuesToNumbers(lengthTrackingWithOffset));
+ assertEquals([0], Keys(lengthTrackingWithOffset));
+
+ // Shrink so that the TAs with offset go out of bounds.
+ rab.resize(1 * ctor.BYTES_PER_ELEMENT);
+
+ assertThrows(() => { fixedLength.entries(); });
+ assertThrows(() => { fixedLength.values(); });
+ assertThrows(() => { fixedLength.keys(); });
+ assertThrows(() => { fixedLengthWithOffset.entries(); });
+ assertThrows(() => { fixedLengthWithOffset.values(); });
+ assertThrows(() => { fixedLengthWithOffset.keys(); });
+ assertThrows(() => { lengthTrackingWithOffset.entries(); });
+ assertThrows(() => { lengthTrackingWithOffset.values(); });
+ assertThrows(() => { lengthTrackingWithOffset.keys(); });
+
+ assertEquals([0], ToNumbersWithEntries(lengthTracking));
+ assertEquals([0], ValuesToNumbers(lengthTracking));
+ assertEquals([0], Keys(lengthTracking));
+
+ // Shrink to zero.
+ rab.resize(0);
+
+ assertThrows(() => { fixedLength.entries(); });
+ assertThrows(() => { fixedLength.values(); });
+ assertThrows(() => { fixedLength.keys(); });
+ assertThrows(() => { fixedLengthWithOffset.entries(); });
+ assertThrows(() => { fixedLengthWithOffset.values(); });
+ assertThrows(() => { fixedLengthWithOffset.keys(); });
+ assertThrows(() => { lengthTrackingWithOffset.entries(); });
+ assertThrows(() => { lengthTrackingWithOffset.values(); });
+ assertThrows(() => { lengthTrackingWithOffset.keys(); });
+
+ assertEquals([], ToNumbersWithEntries(lengthTracking));
+ assertEquals([], ValuesToNumbers(lengthTracking));
+ assertEquals([], Keys(lengthTracking));
+
+ // Grow so that all TAs are back in-bounds.
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6, 8, 10]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, 8, 10, ...] << lengthTracking
+ // [4, 6, 8, 10, ...] << lengthTrackingWithOffset
+
+ assertEquals([0, 2, 4, 6], ToNumbersWithEntries(fixedLength));
+ assertEquals([0, 2, 4, 6], ValuesToNumbers(fixedLength));
+ assertEquals([0, 1, 2, 3], Keys(fixedLength));
+
+ assertEquals([4, 6], ToNumbersWithEntries(fixedLengthWithOffset));
+ assertEquals([4, 6], ValuesToNumbers(fixedLengthWithOffset));
+ assertEquals([0, 1], Keys(fixedLengthWithOffset));
+
+ assertEquals([0, 2, 4, 6, 8, 10], ToNumbersWithEntries(lengthTracking));
+ assertEquals([0, 2, 4, 6, 8, 10], ValuesToNumbers(lengthTracking));
+ assertEquals([0, 1, 2, 3, 4, 5], Keys(lengthTracking));
+
+ assertEquals([4, 6, 8, 10], ToNumbersWithEntries(lengthTrackingWithOffset));
+ assertEquals([4, 6, 8, 10], ValuesToNumbers(lengthTrackingWithOffset));
+ assertEquals([0, 1, 2, 3], Keys(lengthTrackingWithOffset));
+ }
+})();
+
+(function EntriesKeysValuesGrowMidIteration() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateRabForTest(ctor) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ // Iterating with entries() (the 4 loops below).
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ // The fixed length array is not affected by resizing.
+ TestIterationAndResize(fixedLength.entries(),
+ [[0, 0], [1, 2], [2, 4], [3, 6]],
+ rab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+
+ // The fixed length array is not affected by resizing.
+ TestIterationAndResize(fixedLengthWithOffset.entries(),
+ [[0, 4], [1, 6]],
+ rab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+
+ TestIterationAndResize(lengthTracking.entries(),
+ [[0, 0], [1, 2], [2, 4], [3, 6], [4, 0], [5, 0]],
+ rab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ TestIterationAndResize(lengthTrackingWithOffset.entries(),
+ [[0, 4], [1, 6], [2, 0], [3, 0]],
+ rab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ // Iterating with keys() (the 4 loops below).
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ // The fixed length array is not affected by resizing.
+ TestIterationAndResize(fixedLength.keys(),
+ [0, 1, 2, 3],
+ rab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+
+ // The fixed length array is not affected by resizing.
+ TestIterationAndResize(fixedLengthWithOffset.keys(),
+ [0, 1],
+ rab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+
+ TestIterationAndResize(lengthTracking.keys(),
+ [0, 1, 2, 3, 4, 5],
+ rab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ TestIterationAndResize(lengthTrackingWithOffset.keys(),
+ [0, 1, 2, 3],
+ rab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ // Iterating with values() (the 4 loops below).
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ // The fixed length array is not affected by resizing.
+ TestIterationAndResize(fixedLength.values(),
+ [0, 2, 4, 6],
+ rab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+
+ // The fixed length array is not affected by resizing.
+ TestIterationAndResize(fixedLengthWithOffset.values(),
+ [4, 6],
+ rab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+
+ TestIterationAndResize(lengthTracking.values(),
+ [0, 2, 4, 6, 0, 0],
+ rab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ TestIterationAndResize(lengthTrackingWithOffset.values(),
+ [4, 6, 0, 0],
+ rab, 2, 6 * ctor.BYTES_PER_ELEMENT);
+ }
+})();
+
+(function EntriesKeysValuesShrinkMidIteration() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateRabForTest(ctor) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ // Iterating with entries() (the 4 loops below).
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ // The fixed length array goes out of bounds when the RAB is resized.
+ assertThrows(() => { TestIterationAndResize(
+ fixedLength.entries(),
+ null,
+ rab, 2, 3 * ctor.BYTES_PER_ELEMENT); });
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+
+ // The fixed length array goes out of bounds when the RAB is resized.
+ assertThrows(() => { TestIterationAndResize(
+ fixedLengthWithOffset.entries(),
+ null,
+ rab, 1, 3 * ctor.BYTES_PER_ELEMENT); });
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+
+ TestIterationAndResize(lengthTracking.entries(),
+ [[0, 0], [1, 2], [2, 4]],
+ rab, 2, 3 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ TestIterationAndResize(lengthTrackingWithOffset.entries(),
+ [[0, 4], [1, 6]],
+ rab, 2, 3 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ // Iterating with keys() (the 4 loops below).
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ // The fixed length array goes out of bounds when the RAB is resized.
+ assertThrows(() => { TestIterationAndResize(
+ fixedLength.keys(),
+ null,
+ rab, 2, 3 * ctor.BYTES_PER_ELEMENT); });
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+
+ // The fixed length array goes out of bounds when the RAB is resized.
+ assertThrows(() => { TestIterationAndResize(
+ fixedLengthWithOffset.keys(),
+ null,
+ rab, 2, 3 * ctor.BYTES_PER_ELEMENT); });
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+
+ TestIterationAndResize(lengthTracking.keys(),
+ [0, 1, 2],
+ rab, 2, 3 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ TestIterationAndResize(lengthTrackingWithOffset.keys(),
+ [0, 1],
+ rab, 2, 3 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ // Iterating with values() (the 4 loops below).
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+
+ // The fixed length array goes out of bounds when the RAB is resized.
+ assertThrows(() => { TestIterationAndResize(
+ fixedLength.values(),
+ null,
+ rab, 2, 3 * ctor.BYTES_PER_ELEMENT); });
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+
+ // The fixed length array goes out of bounds when the RAB is resized.
+ assertThrows(() => { TestIterationAndResize(
+ fixedLengthWithOffset.values(),
+ null,
+ rab, 2, 3 * ctor.BYTES_PER_ELEMENT); });
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+
+ TestIterationAndResize(lengthTracking.values(),
+ [0, 2, 4],
+ rab, 2, 3 * ctor.BYTES_PER_ELEMENT);
+ }
+
+ for (let ctor of ctors) {
+ const rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ TestIterationAndResize(lengthTrackingWithOffset.values(),
+ [4, 6],
+ rab, 2, 3 * ctor.BYTES_PER_ELEMENT);
+ }
+})();
+
+(function EverySome() {
+ for (let ctor of ctors) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ const fixedLength = new ctor(rab, 0, 4);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ const lengthTracking = new ctor(rab, 0);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+
+ function div3(n) {
+ return Number(n) % 3 == 0;
+ }
+
+ function even(n) {
+ return Number(n) % 2 == 0;
+ }
+
+ function over10(n) {
+ return Number(n) > 10;
+ }
+
+ assertFalse(fixedLength.every(div3));
+ assertTrue(fixedLength.every(even));
+ assertTrue(fixedLength.some(div3));
+ assertFalse(fixedLength.some(over10));
+
+ assertFalse(fixedLengthWithOffset.every(div3));
+ assertTrue(fixedLengthWithOffset.every(even));
+ assertTrue(fixedLengthWithOffset.some(div3));
+ assertFalse(fixedLengthWithOffset.some(over10));
+
+ assertFalse(lengthTracking.every(div3));
+ assertTrue(lengthTracking.every(even));
+ assertTrue(lengthTracking.some(div3));
+ assertFalse(lengthTracking.some(over10));
+
+ assertFalse(lengthTrackingWithOffset.every(div3));
+ assertTrue(lengthTrackingWithOffset.every(even));
+ assertTrue(lengthTrackingWithOffset.some(div3));
+ assertFalse(lengthTrackingWithOffset.some(over10));
+
+ // Shrink so that fixed length TAs go out of bounds.
+ rab.resize(3 * ctor.BYTES_PER_ELEMENT);
+
+ // Orig. array: [0, 2, 4]
+ // [0, 2, 4, ...] << lengthTracking
+ // [4, ...] << lengthTrackingWithOffset
+
+ assertThrows(() => { fixedLength.every(div3); });
+ assertThrows(() => { fixedLength.some(div3); });
+
+ assertThrows(() => { fixedLengthWithOffset.every(div3); });
+ assertThrows(() => { fixedLengthWithOffset.some(div3); });
+
+ assertFalse(lengthTracking.every(div3));
+ assertTrue(lengthTracking.every(even));
+ assertTrue(lengthTracking.some(div3));
+ assertFalse(lengthTracking.some(over10));
+
+ assertFalse(lengthTrackingWithOffset.every(div3));
+ assertTrue(lengthTrackingWithOffset.every(even));
+ assertFalse(lengthTrackingWithOffset.some(div3));
+ assertFalse(lengthTrackingWithOffset.some(over10));
+
+ // Shrink so that the TAs with offset go out of bounds.
+ rab.resize(1 * ctor.BYTES_PER_ELEMENT);
+
+ assertThrows(() => { fixedLength.every(div3); });
+ assertThrows(() => { fixedLength.some(div3); });
+
+ assertThrows(() => { fixedLengthWithOffset.every(div3); });
+ assertThrows(() => { fixedLengthWithOffset.some(div3); });
+
+ assertTrue(lengthTracking.every(div3));
+ assertTrue(lengthTracking.every(even));
+ assertTrue(lengthTracking.some(div3));
+ assertFalse(lengthTracking.some(over10));
+
+ assertThrows(() => { lengthTrackingWithOffset.every(div3); });
+ assertThrows(() => { lengthTrackingWithOffset.some(div3); });
+
+ // Shrink to zero.
+ rab.resize(0);
+
+ assertThrows(() => { fixedLength.every(div3); });
+ assertThrows(() => { fixedLength.some(div3); });
+
+ assertThrows(() => { fixedLengthWithOffset.every(div3); });
+ assertThrows(() => { fixedLengthWithOffset.some(div3); });
+
+ assertTrue(lengthTracking.every(div3));
+ assertTrue(lengthTracking.every(even));
+ assertFalse(lengthTracking.some(div3));
+ assertFalse(lengthTracking.some(over10));
+
+ assertThrows(() => { lengthTrackingWithOffset.every(div3); });
+ assertThrows(() => { lengthTrackingWithOffset.some(div3); });
+
+ // Grow so that all TAs are back in-bounds.
+ rab.resize(6 * ctor.BYTES_PER_ELEMENT);
+ for (let i = 0; i < 6; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+
+ // Orig. array: [0, 2, 4, 6, 8, 10]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, 8, 10, ...] << lengthTracking
+ // [4, 6, 8, 10, ...] << lengthTrackingWithOffset
+
+ assertFalse(fixedLength.every(div3));
+ assertTrue(fixedLength.every(even));
+ assertTrue(fixedLength.some(div3));
+ assertFalse(fixedLength.some(over10));
+
+ assertFalse(fixedLengthWithOffset.every(div3));
+ assertTrue(fixedLengthWithOffset.every(even));
+ assertTrue(fixedLengthWithOffset.some(div3));
+ assertFalse(fixedLengthWithOffset.some(over10));
+
+ assertFalse(lengthTracking.every(div3));
+ assertTrue(lengthTracking.every(even));
+ assertTrue(lengthTracking.some(div3));
+ assertFalse(lengthTracking.some(over10));
+
+ assertFalse(lengthTrackingWithOffset.every(div3));
+ assertTrue(lengthTrackingWithOffset.every(even));
+ assertTrue(lengthTrackingWithOffset.some(div3));
+ assertFalse(lengthTrackingWithOffset.some(over10));
+ }
+})();
+
+(function EveryShrinkMidIteration() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateRabForTest(ctor) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ let values;
+ let rab;
+ let resizeAfter;
+ let resizeTo;
+ function myFunc(n) {
+ if (n == undefined) {
+ values.push(n);
+ } else {
+ values.push(Number(n));
+ }
+ if (values.length == resizeAfter) {
+ rab.resize(resizeTo);
+ }
+ return true;
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+ values = [];
+ resizeAfter = 2;
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ assertTrue(fixedLength.every(myFunc));
+ assertEquals([0, 2, undefined, undefined], values);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ values = [];
+ resizeAfter = 1;
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ assertTrue(fixedLengthWithOffset.every(myFunc));
+ assertEquals([4, undefined], values);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ values = [];
+ resizeAfter = 2;
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ assertTrue(lengthTracking.every(myFunc));
+ assertEquals([0, 2, 4, undefined], values);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+ values = [];
+ resizeAfter = 1;
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ assertTrue(lengthTrackingWithOffset.every(myFunc));
+ assertEquals([4, undefined], values);
+ }
+})();
+
+(function EveryGrowMidIteration() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateRabForTest(ctor) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ let values;
+ let rab;
+ let resizeAfter;
+ let resizeTo;
+ function myFunc(n) {
+ if (n == undefined) {
+ values.push(n);
+ } else {
+ values.push(Number(n));
+ }
+ if (values.length == resizeAfter) {
+ rab.resize(resizeTo);
+ }
+ return true;
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+ values = [];
+ resizeAfter = 2;
+ resizeTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertTrue(fixedLength.every(myFunc));
+ assertEquals([0, 2, 4, 6], values);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ values = [];
+ resizeAfter = 1;
+ resizeTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertTrue(fixedLengthWithOffset.every(myFunc));
+ assertEquals([4, 6], values);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ values = [];
+ resizeAfter = 2;
+ resizeTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertTrue(lengthTracking.every(myFunc));
+ assertEquals([0, 2, 4, 6], values);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+ values = [];
+ resizeAfter = 1;
+ resizeTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertTrue(lengthTrackingWithOffset.every(myFunc));
+ assertEquals([4, 6], values);
+ }
+})();
+
+(function SomeShrinkMidIteration() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateRabForTest(ctor) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ let values;
+ let rab;
+ let resizeAfter;
+ let resizeTo;
+ function myFunc(n) {
+ if (n == undefined) {
+ values.push(n);
+ } else {
+ values.push(Number(n));
+ }
+ if (values.length == resizeAfter) {
+ rab.resize(resizeTo);
+ }
+ return false;
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+ values = [];
+ resizeAfter = 2;
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ assertFalse(fixedLength.some(myFunc));
+ assertEquals([0, 2, undefined, undefined], values);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ values = [];
+ resizeAfter = 1;
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ assertFalse(fixedLengthWithOffset.some(myFunc));
+ assertEquals([4, undefined], values);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ values = [];
+ resizeAfter = 2;
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ assertFalse(lengthTracking.some(myFunc));
+ assertEquals([0, 2, 4, undefined], values);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+ values = [];
+ resizeAfter = 1;
+ resizeTo = 3 * ctor.BYTES_PER_ELEMENT;
+ assertFalse(lengthTrackingWithOffset.some(myFunc));
+ assertEquals([4, undefined], values);
+ }
+})();
+
+(function SomeGrowMidIteration() {
+ // Orig. array: [0, 2, 4, 6]
+ // [0, 2, 4, 6] << fixedLength
+ // [4, 6] << fixedLengthWithOffset
+ // [0, 2, 4, 6, ...] << lengthTracking
+ // [4, 6, ...] << lengthTrackingWithOffset
+ function CreateRabForTest(ctor) {
+ const rab = CreateResizableArrayBuffer(4 * ctor.BYTES_PER_ELEMENT,
+ 8 * ctor.BYTES_PER_ELEMENT);
+ // Write some data into the array.
+ const taWrite = new ctor(rab);
+ for (let i = 0; i < 4; ++i) {
+ WriteToTypedArray(taWrite, i, 2 * i);
+ }
+ return rab;
+ }
+
+ let values = [];
+ let rab;
+ let resizeAfter;
+ let resizeTo;
+ function myFunc(n) {
+ if (n == undefined) {
+ values.push(n);
+ } else {
+ values.push(Number(n));
+ }
+ if (values.length == resizeAfter) {
+ rab.resize(resizeTo);
+ }
+ return false;
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLength = new ctor(rab, 0, 4);
+ values = [];
+ resizeAfter = 2;
+ resizeTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertFalse(fixedLength.some(myFunc));
+ assertEquals([0, 2, 4, 6], values);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const fixedLengthWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT, 2);
+ values = [];
+ resizeAfter = 1;
+ resizeTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertFalse(fixedLengthWithOffset.some(myFunc));
+ assertEquals([4, 6], values);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTracking = new ctor(rab, 0);
+ values = [];
+ resizeAfter = 2;
+ resizeTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertFalse(lengthTracking.some(myFunc));
+ assertEquals([0, 2, 4, 6], values);
+ }
+
+ for (let ctor of ctors) {
+ rab = CreateRabForTest(ctor);
+ const lengthTrackingWithOffset = new ctor(rab, 2 * ctor.BYTES_PER_ELEMENT);
+ values = [];
+ rab = rab;
+ resizeAfter = 1;
+ resizeTo = 5 * ctor.BYTES_PER_ELEMENT;
+ assertFalse(lengthTrackingWithOffset.some(myFunc));
+ assertEquals([4, 6], values);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/array-copy-benchmark.js b/deps/v8/test/mjsunit/wasm/array-copy-benchmark.js
index 0018f15659..eec9716b51 100644
--- a/deps/v8/test/mjsunit/wasm/array-copy-benchmark.js
+++ b/deps/v8/test/mjsunit/wasm/array-copy-benchmark.js
@@ -32,11 +32,11 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
.addBody([
...wasmI32Const(array_length),
kGCPrefix, kExprRttCanon, array_index,
- kGCPrefix, kExprArrayNewDefault, array_index,
+ kGCPrefix, kExprArrayNewDefaultWithRtt, array_index,
kExprGlobalSet, from.index,
...wasmI32Const(array_length),
kGCPrefix, kExprRttCanon, array_index,
- kGCPrefix, kExprArrayNewDefault, array_index,
+ kGCPrefix, kExprArrayNewDefaultWithRtt, array_index,
kExprGlobalSet, to.index
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/externref-table.js b/deps/v8/test/mjsunit/wasm/externref-table.js
index 0cfb656cb1..4b9463781b 100644
--- a/deps/v8/test/mjsunit/wasm/externref-table.js
+++ b/deps/v8/test/mjsunit/wasm/externref-table.js
@@ -10,8 +10,8 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
print(arguments.callee.name);
let table = new WebAssembly.Table({element: "externref", initial: 10});
- // Table should be initialized with null.
- assertEquals(null, table.get(1));
+ // Table should be initialized with undefined.
+ assertEquals(undefined, table.get(1));
let obj = {'hello' : 'world'};
table.set(2, obj);
assertSame(obj, table.get(2));
@@ -92,18 +92,19 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(table.get(2), testObject);
})();
+function getDummy(val) {
+ let builder = new WasmModuleBuilder();
+ builder.addFunction('dummy', kSig_i_v)
+ .addBody([kExprI32Const, val])
+ .exportAs('dummy');
+ return builder.instantiate().exports.dummy;
+}
+
(function TestFuncRefTableConstructorWithDefaultValue() {
print(arguments.callee.name);
const expected = 6;
- let dummy =
- (() => {
- let builder = new WasmModuleBuilder();
- builder.addFunction('dummy', kSig_i_v)
- .addBody([kExprI32Const, expected])
- .exportAs('dummy');
- return builder.instantiate().exports.dummy;
- })();
+ let dummy = getDummy(expected);
const argument = { "element": "anyfunc", "initial": 3 };
const table = new WebAssembly.Table(argument, dummy);
@@ -112,3 +113,26 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(table.get(1)(), expected);
assertEquals(table.get(2)(), expected);
})();
+
+(function TestExternFuncTableSetWithoutValue() {
+ print(arguments.callee.name);
+
+ const expected = 6;
+ const dummy = getDummy(expected);
+ const argument = { "element": "anyfunc", "initial": 3 };
+ const table = new WebAssembly.Table(argument, dummy);
+ assertEquals(table.get(1)(), expected);
+ table.set(1);
+ assertEquals(table.get(1), null);
+})();
+
+(function TestExternRefTableSetWithoutValue() {
+ print(arguments.callee.name);
+
+ const testObject = {};
+ const argument = { "element": "externref", "initial": 3 };
+ const table = new WebAssembly.Table(argument, testObject);
+ assertEquals(table.get(1), testObject);
+ table.set(1);
+ assertEquals(table.get(1), undefined);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/gc-nominal.js b/deps/v8/test/mjsunit/wasm/gc-nominal.js
index a58a51d732..8b371fc84f 100644
--- a/deps/v8/test/mjsunit/wasm/gc-nominal.js
+++ b/deps/v8/test/mjsunit/wasm/gc-nominal.js
@@ -6,26 +6,49 @@
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
-var builder = new WasmModuleBuilder();
-let struct1 = builder.addStruct([makeField(kWasmI32, true)]);
-let struct2 = builder.addStructExtending(
- [makeField(kWasmI32, true), makeField(kWasmI32, true)], struct1);
+(function() {
+ var builder = new WasmModuleBuilder();
+ let struct1 = builder.addStructSubtype([makeField(kWasmI32, true)]);
+ let struct2 = builder.addStructSubtype(
+ [makeField(kWasmI32, true), makeField(kWasmI32, true)], struct1);
-let array1 = builder.addArray(kWasmI32, true);
-let array2 = builder.addArrayExtending(kWasmI32, true, array1);
+ let array1 = builder.addArraySubtype(kWasmI32, true);
+ let array2 = builder.addArraySubtype(kWasmI32, true, array1);
-builder.addFunction("main", kSig_v_v)
- .addLocals(wasmOptRefType(struct1), 1)
- .addLocals(wasmOptRefType(array1), 1)
- .addBody([
- kGCPrefix, kExprRttCanon, struct2,
- kGCPrefix, kExprStructNewDefault, struct2,
+ builder.addFunction("main", kSig_v_v)
+ .addLocals(wasmOptRefType(struct1), 1)
+ .addLocals(wasmOptRefType(array1), 1)
+ .addBody([
+ // Check that we can create a struct with explicit RTT...
+ kGCPrefix, kExprRttCanon, struct2, kGCPrefix,
+ kExprStructNewDefaultWithRtt, struct2,
+ // ...and upcast it.
kExprLocalSet, 0,
+ // Check that we can create a struct with implicit RTT.
+ kGCPrefix, kExprStructNewDefault, struct2, kExprLocalSet, 0,
+ // Check that we can create an array with explicit RTT...
kExprI32Const, 10, // length
- kGCPrefix, kExprRttCanon, array2,
- kGCPrefix, kExprArrayNewDefault, array2,
- kExprLocalSet, 1
- ]);
+ kGCPrefix, kExprRttCanon, array2, kGCPrefix,
+ kExprArrayNewDefaultWithRtt, array2,
+ // ...and upcast it.
+ kExprLocalSet, 1,
+ // Check that we can create an array with implicit RTT.
+ kExprI32Const, 10, // length
+ kGCPrefix, kExprArrayNewDefault, array2, kExprLocalSet, 1
+ ])
+ .exportFunc();
+
+ // This test is only interested in type checking.
+ builder.instantiate();
+})();
-// This test is only interested in type checking.
-builder.instantiate();
+(function () {
+ let builder = new WasmModuleBuilder();
+ let t0 = builder.addStructSubtype([]);
+ for (let i = 0; i < 32; i++) {
+ builder.addStructSubtype([], i);
+ }
+ assertThrows(
+ () => builder.instantiate(), WebAssembly.CompileError,
+ /subtyping depth is greater than allowed/);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/inlining.js b/deps/v8/test/mjsunit/wasm/inlining.js
index 3fd5179b32..bf75673ec6 100644
--- a/deps/v8/test/mjsunit/wasm/inlining.js
+++ b/deps/v8/test/mjsunit/wasm/inlining.js
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --wasm-inlining --no-liftoff
+// Flags: --wasm-inlining --no-liftoff --experimental-wasm-return-call
+// Flags: --experimental-wasm-typed-funcref
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
// TODO(12166): Consider running tests with --trace-wasm and inspecting their
-// output.
+// output, or implementing testing infrastructure with --allow-natives-syntax.
(function SimpleInliningTest() {
let builder = new WasmModuleBuilder();
@@ -22,7 +23,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
.exportAs("main");
let instance = builder.instantiate();
- assertEquals(instance.exports.main(10), 14);
+ assertEquals(14, instance.exports.main(10));
})();
(function MultiReturnTest() {
@@ -38,7 +39,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
.exportAs("main");
let instance = builder.instantiate();
- assertEquals(instance.exports.main(10), 9 * 11);
+ assertEquals(9 * 11, instance.exports.main(10));
})();
(function NoReturnTest() {
@@ -55,7 +56,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
.exportAs("main");
let instance = builder.instantiate();
- assertEquals(instance.exports.main(10), 10);
+ assertEquals(10, instance.exports.main(10));
})();
(function InfiniteLoopTest() {
@@ -75,3 +76,280 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
builder.instantiate();
})();
+
+(function TailCallInCalleeTest() {
+ let builder = new WasmModuleBuilder();
+
+ // f(x) = g(x - 1)
+ let callee = builder.addFunction("callee", kSig_i_i)
+ .addBody([kExprLocalGet, 0, kExprI32Const, 1, kExprI32Sub,
+ kExprReturnCall, 1]);
+ // g(x) = x * 2
+ builder.addFunction("inner_callee", kSig_i_i)
+ .addBody([kExprLocalGet, 0, kExprI32Const, 2, kExprI32Mul]);
+ // h(x) = f(x) + 5
+ builder.addFunction("main", kSig_i_i)
+ .addBody([kExprLocalGet, 0, kExprCallFunction, callee.index,
+ kExprI32Const, 5, kExprI32Add])
+ .exportAs("main");
+
+ let instance = builder.instantiate();
+ assertEquals(23, instance.exports.main(10));
+})();
+
+(function MultipleCallAndReturnSitesTest() {
+ let builder = new WasmModuleBuilder();
+
+ // f(x) = x >= 0 ? x - 1 : x + 1
+ let callee = builder.addFunction("callee", kSig_i_i)
+ .addBody([kExprLocalGet, 0, kExprI32Const, 0, kExprI32GeS,
+ kExprIf, kWasmI32,
+ kExprLocalGet, 0, kExprI32Const, 1, kExprI32Sub,
+ kExprElse,
+ kExprLocalGet, 0, kExprI32Const, 1, kExprI32Add,
+ kExprEnd]);
+ // g(x) = f(x) * f(-x)
+ builder.addFunction("main", kSig_i_i)
+ .addBody([kExprLocalGet, 0, kExprCallFunction, callee.index,
+ kExprI32Const, 0, kExprLocalGet, 0, kExprI32Sub,
+ kExprCallFunction, callee.index,
+ kExprI32Mul])
+ .exportAs("main");
+
+ let instance = builder.instantiate();
+ assertEquals(-81, instance.exports.main(10));
+})();
+
+(function TailCallInCallerTest() {
+ let builder = new WasmModuleBuilder();
+
+ // f(x) = x > 0 ? g(x) + 1: g(x - 1);
+ let callee = builder.addFunction("callee", kSig_i_i)
+ .addBody([kExprLocalGet, 0, kExprI32Const, 0, kExprI32GeS,
+ kExprIf, kWasmI32,
+ kExprLocalGet, 0, kExprCallFunction, 1, kExprI32Const, 1,
+ kExprI32Add,
+ kExprElse,
+ kExprLocalGet, 0, kExprI32Const, 1, kExprI32Sub,
+ kExprReturnCall, 1,
+ kExprEnd]);
+ // g(x) = x * 2
+ builder.addFunction("inner_callee", kSig_i_i)
+ .addBody([kExprLocalGet, 0, kExprI32Const, 2, kExprI32Mul]);
+ // h(x) = f(x + 5)
+ builder.addFunction("main", kSig_i_i)
+ .addBody([kExprLocalGet, 0, kExprI32Const, 5, kExprI32Add,
+ kExprReturnCall, callee.index])
+ .exportAs("main");
+
+ let instance = builder.instantiate();
+ assertEquals(31, instance.exports.main(10));
+ assertEquals(-12, instance.exports.main(-10));
+})();
+
+(function HandledInHandledTest() {
+ let builder = new WasmModuleBuilder();
+ let tag = builder.addTag(kSig_v_i);
+
+ let callee = builder.addFunction("callee", kSig_i_i)
+ .addBody([kExprTry, kWasmI32,
+ kExprI32Const, 42,
+ kExprThrow, tag,
+ kExprCatchAll,
+ kExprLocalGet, 0,
+ kExprEnd]);
+
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([kExprTry, kWasmI32,
+ kExprLocalGet, 0,
+ kExprCallFunction, callee.index,
+ kExprCatchAll,
+ kExprLocalGet, 1,
+ kExprEnd])
+ .exportAs("main");
+
+ let instance = builder.instantiate();
+ assertEquals(10, instance.exports.main(10, 20));
+})();
+
+(function HandledInUnhandledTest() {
+ let builder = new WasmModuleBuilder();
+ let tag = builder.addTag(kSig_v_i);
+
+ let callee = builder.addFunction("callee", kSig_i_i)
+ .addBody([kExprTry, kWasmI32,
+ kExprI32Const, 42,
+ kExprThrow, tag,
+ kExprCatchAll,
+ kExprLocalGet, 0,
+ kExprEnd]);
+
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([kExprLocalGet, 0,
+ kExprCallFunction, callee.index,])
+ .exportAs("main");
+
+ let instance = builder.instantiate();
+ assertEquals(10, instance.exports.main(10, 20));
+})();
+
+(function UnhandledInUnhandledTest() {
+ let builder = new WasmModuleBuilder();
+ let tag = builder.addTag(kSig_v_i);
+
+ let callee = builder.addFunction("callee", kSig_i_i)
+ .addBody([kExprI32Const, 42, kExprThrow, tag]);
+
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([kExprLocalGet, 0,
+ kExprCallFunction, callee.index])
+ .exportAs("main");
+
+ let instance = builder.instantiate();
+ assertThrows(() => instance.exports.main(10, 20), WebAssembly.Exception);
+})();
+
+// This is the most interesting of the exception tests, as it requires rewiring
+// the unhandled calls in the callee (including the 'throw' builtin) to the
+// handler in the caller.
+(function UnhandledInHandledTest() {
+ let builder = new WasmModuleBuilder();
+ let tag = builder.addTag(kSig_v_i);
+
+ let callee = builder.addFunction("callee", kSig_i_i)
+ .addBody([
+ kExprLocalGet, 0,
+ kExprIf, kWasmI32,
+ kExprLocalGet, 0, kExprThrow, tag,
+ kExprElse,
+ kExprCallFunction, 1,
+ kExprEnd]);
+
+ builder.addFunction("unreachable", kSig_i_v)
+ .addBody([kExprUnreachable]);
+
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([kExprTry, kWasmI32,
+ kExprLocalGet, 0,
+ kExprCallFunction, callee.index,
+ kExprCatchAll,
+ kExprLocalGet, 1,
+ kExprEnd])
+ .exportAs("main");
+
+ let instance = builder.instantiate();
+ assertEquals(20, instance.exports.main(10, 20));
+})();
+
+(function CallRefSpecSucceededTest() {
+ let builder = new WasmModuleBuilder();
+
+ // f(x) = x - 1
+ let callee = builder.addFunction("callee", kSig_i_i)
+ .addBody([kExprLocalGet, 0, kExprI32Const, 1, kExprI32Sub]);
+
+ let global = builder.addGlobal(wasmRefType(0), false,
+ WasmInitExpr.RefFunc(callee.index));
+
+ // g(x) = f(5) + x
+ builder.addFunction("main", kSig_i_i)
+ .addBody([kExprI32Const, 5, kExprGlobalGet, global.index, kExprCallRef,
+ kExprLocalGet, 0, kExprI32Add])
+ .exportAs("main");
+
+ let instance = builder.instantiate();
+ assertEquals(14, instance.exports.main(10));
+})();
+
+(function CallRefSpecFailedTest() {
+ let builder = new WasmModuleBuilder();
+
+ // h(x) = x - 1
+ builder.addFunction("callee", kSig_i_i)
+ .addBody([kExprLocalGet, 0, kExprI32Const, 1, kExprI32Sub]);
+
+ // f(x) = x - 2
+ let callee = builder.addFunction("callee", kSig_i_i)
+ .addBody([kExprLocalGet, 0, kExprI32Const, 2, kExprI32Sub]);
+
+ let global = builder.addGlobal(wasmRefType(1), false,
+ WasmInitExpr.RefFunc(callee.index));
+
+ // g(x) = f(5) + x
+ builder.addFunction("main", kSig_i_i)
+ .addBody([kExprI32Const, 5, kExprGlobalGet, global.index, kExprCallRef,
+ kExprLocalGet, 0, kExprI32Add])
+ .exportAs("main");
+
+ let instance = builder.instantiate();
+ assertEquals(13, instance.exports.main(10));
+})();
+
+(function CallReturnRefSpecSucceededTest() {
+ let builder = new WasmModuleBuilder();
+
+ // f(x) = x - 1
+ let callee = builder.addFunction("callee", kSig_i_i)
+ .addBody([kExprLocalGet, 0, kExprI32Const, 1, kExprI32Sub]);
+
+ let global = builder.addGlobal(wasmRefType(0), false,
+ WasmInitExpr.RefFunc(callee.index));
+
+ // g(x) = f(5 + x)
+ builder.addFunction("main", kSig_i_i)
+ .addBody([kExprI32Const, 5, kExprLocalGet, 0, kExprI32Add,
+ kExprGlobalGet, global.index, kExprReturnCallRef])
+ .exportAs("main");
+
+ let instance = builder.instantiate();
+ assertEquals(14, instance.exports.main(10));
+})();
+
+(function CallReturnRefSpecFailedTest() {
+ let builder = new WasmModuleBuilder();
+
+ // h(x) = x - 1
+ builder.addFunction("callee", kSig_i_i)
+ .addBody([kExprLocalGet, 0, kExprI32Const, 1, kExprI32Sub]);
+
+ // f(x) = x - 2
+ let callee = builder.addFunction("callee", kSig_i_i)
+ .addBody([kExprLocalGet, 0, kExprI32Const, 2, kExprI32Sub]);
+
+ let global = builder.addGlobal(wasmRefType(1), false,
+ WasmInitExpr.RefFunc(callee.index));
+
+ // g(x) = f(5 + x)
+ builder.addFunction("main", kSig_i_i)
+ .addBody([kExprI32Const, 5, kExprLocalGet, 0, kExprI32Add,
+ kExprGlobalGet, global.index, kExprReturnCallRef])
+ .exportAs("main");
+
+ let instance = builder.instantiate();
+ assertEquals(13, instance.exports.main(10));
+})();
+
+// Tests that no LoopExits are emitted in the inlined function.
+(function LoopUnrollingTest() {
+ let builder = new WasmModuleBuilder();
+
+ // f(x, y) = { do { y += 1; x -= 1; } while (x > 0); return y; }
+ let callee = builder.addFunction("callee", kSig_i_ii)
+ .addBody([
+ kExprLoop, kWasmVoid,
+ kExprLocalGet, 1, kExprI32Const, 1, kExprI32Add, kExprLocalSet, 1,
+ kExprLocalGet, 0, kExprI32Const, 1, kExprI32Sub, kExprLocalSet, 0,
+ kExprLocalGet, 0, kExprI32Const, 0, kExprI32GtS, kExprBrIf, 0,
+ kExprEnd,
+ kExprLocalGet, 1
+ ]);
+ // g(x) = f(5, x) + x
+ builder.addFunction("main", kSig_i_i)
+ .addBody([kExprI32Const, 5, kExprLocalGet, 0,
+ kExprCallFunction, callee.index,
+ kExprLocalGet, 0, kExprI32Add])
+ .exportAs("main");
+
+ let instance = builder.instantiate();
+ assertEquals(25, instance.exports.main(10));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/js-api.js b/deps/v8/test/mjsunit/wasm/js-api.js
index 64216fffb7..f9b231242c 100644
--- a/deps/v8/test/mjsunit/wasm/js-api.js
+++ b/deps/v8/test/mjsunit/wasm/js-api.js
@@ -698,8 +698,6 @@ assertThrows(
assertThrows(
() => set.call({}), TypeError, /Receiver is not a WebAssembly.Table/);
assertThrows(
- () => set.call(tbl1, 0), TypeError, /must be null or a WebAssembly function/);
-assertThrows(
() => set.call(tbl1, undefined), TypeError,
/must be convertible to a valid number/);
assertThrows(
@@ -765,7 +763,7 @@ assertThrows(
() => tbl.grow(-Infinity), TypeError, /must be convertible to a valid number/);
assertEq(tbl.grow(0), 1);
assertEq(tbl.length, 1);
-assertEq(tbl.grow(1, 4), 1);
+assertEq(tbl.grow(1, null, 4), 1);
assertEq(tbl.length, 2);
assertEq(tbl.length, 2);
assertThrows(() => tbl.grow(1), Error, /failed to grow table by \d+/);
diff --git a/deps/v8/test/mjsunit/wasm/load-elimination.js b/deps/v8/test/mjsunit/wasm/load-elimination.js
index 8ca04ed040..b5ecc2eb40 100644
--- a/deps/v8/test/mjsunit/wasm/load-elimination.js
+++ b/deps/v8/test/mjsunit/wasm/load-elimination.js
@@ -182,7 +182,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
.addBody([
kExprI32Const, 5,
kGCPrefix, kExprRttCanon, array,
- kGCPrefix, kExprArrayNewDefault, array,
+ kGCPrefix, kExprArrayNewDefaultWithRtt, array,
kExprLocalSet, 1,
kExprLocalGet, 1, // a[i] = i for i = {0..4}
@@ -308,7 +308,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
builder.addFunction("main", kSig_i_i)
.addBody([
kExprI32Const, 10, kGCPrefix, kExprRttCanon, array,
- kGCPrefix, kExprArrayNewDefault, array,
+ kGCPrefix, kExprArrayNewDefaultWithRtt, array,
kExprI32Const, 7,
kExprCallFunction, tester.index,
])
diff --git a/deps/v8/test/mjsunit/wasm/module-memory.js b/deps/v8/test/mjsunit/wasm/module-memory.js
index 7420a9cd2a..0db0e8bf0d 100644
--- a/deps/v8/test/mjsunit/wasm/module-memory.js
+++ b/deps/v8/test/mjsunit/wasm/module-memory.js
@@ -166,29 +166,3 @@ function testOOBThrows() {
}
testOOBThrows();
-
-function testAddressSpaceLimit() {
- // 1TiB + 4 GiB, see wasm-memory.h
- const kMaxAddressSpace = 1 * 1024 * 1024 * 1024 * 1024
- + 4 * 1024 * 1024 * 1024;
- const kAddressSpacePerMemory = 10 * 1024 * 1024 * 1024;
-
- let last_memory;
- try {
- let memories = [];
- let address_space = 0;
- while (address_space <= kMaxAddressSpace + 1) {
- last_memory = new WebAssembly.Memory({initial: 1})
- memories.push(last_memory);
- address_space += kAddressSpacePerMemory;
- }
- } catch (e) {
- assertTrue(e instanceof RangeError);
- return;
- }
- assertUnreachable("should have reached the address space limit");
-}
-
-if(%IsWasmTrapHandlerEnabled()) {
- testAddressSpaceLimit();
-}
diff --git a/deps/v8/test/mjsunit/wasm/mutable-globals.js b/deps/v8/test/mjsunit/wasm/mutable-globals.js
index e16d318d84..80d3f3515d 100644
--- a/deps/v8/test/mjsunit/wasm/mutable-globals.js
+++ b/deps/v8/test/mjsunit/wasm/mutable-globals.js
@@ -21,12 +21,8 @@ function assertGlobalIsValid(global) {
assertThrows(() => new WebAssembly.Global({}), TypeError);
assertThrows(() => new WebAssembly.Global({value: 'foo'}), TypeError);
assertThrows(() => new WebAssembly.Global({value: 'i128'}), TypeError);
- // Without --experimental-wasm-reftypes, globals of type {externref} and {anyfunc}
- // are not allowed.
- assertThrows(() => new WebAssembly.Global({value: 'externref'}), TypeError);
- assertThrows(() => new WebAssembly.Global({value: 'anyfunc'}), TypeError);
- for (let type of ['i32', 'f32', 'f64', 'i64']) {
+ for (let type of ['i32', 'f32', 'f64', 'i64', 'externref', 'anyfunc']) {
assertGlobalIsValid(new WebAssembly.Global({value: type}));
}
})();
diff --git a/deps/v8/test/mjsunit/wasm/prototype.js b/deps/v8/test/mjsunit/wasm/prototype.js
new file mode 100644
index 0000000000..c8f06bff72
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/prototype.js
@@ -0,0 +1,42 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+let emptyModuleBinary = new WasmModuleBuilder().toBuffer();
+
+(function ModulePrototype() {
+ class _Module extends WebAssembly.Module {}
+ let module = new _Module(emptyModuleBinary);
+ assertInstanceof(module, _Module);
+ assertInstanceof(module, WebAssembly.Module);
+})();
+
+(function InstancePrototype() {
+ class _Instance extends WebAssembly.Instance {}
+ let instance = new _Instance(new WebAssembly.Module(emptyModuleBinary));
+ assertInstanceof(instance, _Instance);
+ assertInstanceof(instance, WebAssembly.Instance);
+})();
+
+(function TablePrototype() {
+ class _Table extends WebAssembly.Table {}
+ let table = new _Table({initial: 0, element: "anyfunc"});
+ assertInstanceof(table, _Table);
+ assertInstanceof(table, WebAssembly.Table);
+})();
+
+(function MemoryPrototype() {
+ class _Memory extends WebAssembly.Memory {}
+ let memory = new _Memory({initial: 0, maximum: 1});
+ assertInstanceof(memory, _Memory);
+ assertInstanceof(memory, WebAssembly.Memory);
+})();
+
+(function GlobalPrototype() {
+ class _Global extends WebAssembly.Global {}
+ let global = new _Global({value: 'i32', mutable: false}, 0);
+ assertInstanceof(global, _Global);
+ assertInstanceof(global, WebAssembly.Global);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/reference-globals.js b/deps/v8/test/mjsunit/wasm/reference-globals.js
index 76d41f8f97..6ab071f9fa 100644
--- a/deps/v8/test/mjsunit/wasm/reference-globals.js
+++ b/deps/v8/test/mjsunit/wasm/reference-globals.js
@@ -113,7 +113,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
var struct_index = builder.addStruct([{type: kWasmI32, mutability: false}]);
var composite_struct_index = builder.addStruct(
[{type: kWasmI32, mutability: false},
- {type: wasmRefType(struct_index), mutability: false},
+ {type: wasmOptRefType(struct_index), mutability: false},
{type: kWasmI8, mutability: true}]);
let field1_value = 432;
@@ -136,6 +136,12 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
WasmInitExpr.I32Const(field3_value),
WasmInitExpr.RttCanon(composite_struct_index)]));
+ var global_default = builder.addGlobal(
+ wasmRefType(composite_struct_index), false,
+ WasmInitExpr.StructNewDefaultWithRtt(
+ composite_struct_index,
+ WasmInitExpr.RttCanon(composite_struct_index)));
+
builder.addFunction("field_1", kSig_i_v)
.addBody([
kExprGlobalGet, global.index,
@@ -156,11 +162,33 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
kExprGlobalGet, global.index,
kGCPrefix, kExprStructGetS, composite_struct_index, 2])
.exportFunc();
+
+ builder.addFunction("field_1_default", kSig_i_v)
+ .addBody([
+ kExprGlobalGet, global_default.index,
+ kGCPrefix, kExprStructGet, composite_struct_index, 0])
+ .exportFunc();
+
+ builder.addFunction("field_2_default", makeSig([], [kWasmAnyRef]))
+ .addBody([
+ kExprGlobalGet, global_default.index,
+ kGCPrefix, kExprStructGet, composite_struct_index, 1])
+ .exportFunc();
+
+ builder.addFunction("field_3_default", kSig_i_v)
+ .addBody([
+ kExprGlobalGet, global_default.index,
+ kGCPrefix, kExprStructGetS, composite_struct_index, 2])
+ .exportFunc();
+
var instance = builder.instantiate({});
assertEquals(field1_value, instance.exports.field_1());
assertEquals(field2_value, instance.exports.field_2());
assertEquals((field3_value << 24) >> 24, instance.exports.field_3());
+ assertEquals(0, instance.exports.field_1_default());
+ assertEquals(null, instance.exports.field_2_default());
+ assertEquals(0, instance.exports.field_3_default());
})();
(function TestArrayInitExprNumeric() {
diff --git a/deps/v8/test/mjsunit/wasm/shared-memory-gc-stress.js b/deps/v8/test/mjsunit/wasm/shared-memory-gc-stress.js
index 1dbbcb9ff6..e46d226b48 100644
--- a/deps/v8/test/mjsunit/wasm/shared-memory-gc-stress.js
+++ b/deps/v8/test/mjsunit/wasm/shared-memory-gc-stress.js
@@ -34,4 +34,5 @@ function RunSomeAllocs(total, retained, pages, max = pages) {
RunSomeAllocs(10, 1, 1, 1);
RunSomeAllocs(100, 3, 1, 1);
RunSomeAllocs(1000, 10, 1, 1);
-RunSomeAllocs(10000, 20, 1, 1);
+// TODO(12278): Make this faster (by collection memories earlier?) and reenable.
+// RunSomeAllocs(10000, 20, 1, 1);
diff --git a/deps/v8/test/mjsunit/wasm/shared-memory-worker-explicit-gc-stress.js b/deps/v8/test/mjsunit/wasm/shared-memory-worker-explicit-gc-stress.js
index d970126f11..0c46b64ee7 100644
--- a/deps/v8/test/mjsunit/wasm/shared-memory-worker-explicit-gc-stress.js
+++ b/deps/v8/test/mjsunit/wasm/shared-memory-worker-explicit-gc-stress.js
@@ -6,8 +6,6 @@
d8.file.execute("test/mjsunit/worker-ping-test.js");
-let kDisabledAbort = false; // TODO(9380): enable abort for this test
-
let config = {
numThings: 4, // size of circular buffer
numWorkers: 4, // number of workers
@@ -16,7 +14,7 @@ let config = {
traceScript: false, // print the script
traceAlloc: true, // print each allocation attempt
traceIteration: 10, // print diagnostics every so many iterations
- abortOnFail: kDisabledAbort, // kill worker if allocation fails
+ abortOnFail: true, // kill worker if allocation fails
AllocThing: function AllocThing(id) {
let pages = 1, max = 1;
diff --git a/deps/v8/test/mjsunit/wasm/table-fill.js b/deps/v8/test/mjsunit/wasm/table-fill.js
index 78b13f1706..97e874189b 100644
--- a/deps/v8/test/mjsunit/wasm/table-fill.js
+++ b/deps/v8/test/mjsunit/wasm/table-fill.js
@@ -74,7 +74,7 @@ function checkExternRefTable(getter, start, count, value) {
(function testExternRefTableIsUninitialized() {
print(arguments.callee.name);
- checkExternRefTable(instance.exports[`get${import_ref}`], 0, size, null);
+ checkExternRefTable(instance.exports[`get${import_ref}`], 0, size, undefined);
checkExternRefTable(instance.exports[`get${internal_ref}`], 0, size, null);
})();
@@ -102,7 +102,7 @@ function checkExternRefTable(getter, start, count, value) {
kTrapTableOutOfBounds,
() => instance.exports[`fill${import_ref}`](start, value, count));
checkExternRefTable(
- instance.exports[`get${import_ref}`], start, size - start, null);
+ instance.exports[`get${import_ref}`], start, size - start, undefined);
value = 45;
assertTraps(
diff --git a/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js b/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js
index fad6825fbc..25ed6eb1c4 100644
--- a/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js
+++ b/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js
@@ -130,7 +130,7 @@ testGrowInternalAnyFuncTable(9);
const table = new WebAssembly.Table({element: "externref", initial: size});
const instance = builder.instantiate({imp: {table: table}});
- assertEquals(null, table.get(size - 2));
+ assertEquals(undefined, table.get(size - 2));
function growAndCheck(element, grow_by) {
assertEquals(size, instance.exports.size());
diff --git a/deps/v8/test/mjsunit/wasm/table.js b/deps/v8/test/mjsunit/wasm/table.js
index ef4eddaf9d..0175169f17 100644
--- a/deps/v8/test/mjsunit/wasm/table.js
+++ b/deps/v8/test/mjsunit/wasm/table.js
@@ -196,7 +196,6 @@ function assertTableIsValid(table, length) {
assertThrows(() => table.set(key, f), RangeError);
}
- assertThrows(() => table.set(0), TypeError);
for (let val of [undefined, 0, "", {}, [], () => {}]) {
assertThrows(() => table.set(0, val), TypeError);
}
@@ -285,3 +284,18 @@ function assertTableIsValid(table, length) {
table.grow({valueOf: () => {table.grow(2); return 1;}});
assertEquals(3, table.length);
})();
+
+(function TestGrowWithInit() {
+ function getDummy(val) {
+ let builder = new WasmModuleBuilder();
+ builder.addFunction('dummy', kSig_i_v)
+ .addBody([kExprI32Const, val])
+ .exportAs('dummy');
+ return builder.instantiate().exports.dummy;
+ }
+ let table = new WebAssembly.Table({element: "anyfunc", initial: 1});
+ table.grow(5, getDummy(24));
+ for (let i = 1; i <= 5; ++i) {
+ assertEquals(24, table.get(i)());
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/type-reflection-with-externref.js b/deps/v8/test/mjsunit/wasm/type-reflection-with-externref.js
index 8b35c1cd0b..8f1a5a3f7c 100644
--- a/deps/v8/test/mjsunit/wasm/type-reflection-with-externref.js
+++ b/deps/v8/test/mjsunit/wasm/type-reflection-with-externref.js
@@ -8,13 +8,13 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
(function TestTableType() {
let table = new WebAssembly.Table({initial: 1, element: "externref"});
- let type = WebAssembly.Table.type(table);
+ let type = table.type();
assertEquals(1, type.minimum);
assertEquals("externref", type.element);
assertEquals(2, Object.getOwnPropertyNames(type).length);
table = new WebAssembly.Table({initial: 2, maximum: 15, element: "externref"});
- type = WebAssembly.Table.type(table);
+ type = table.type();
assertEquals(2, type.minimum);
assertEquals(15, type.maximum);
assertEquals("externref", type.element);
@@ -23,19 +23,19 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
(function TestGlobalType() {
let global = new WebAssembly.Global({value: "externref", mutable: true});
- let type = WebAssembly.Global.type(global);
+ let type = global.type();
assertEquals("externref", type.value);
assertEquals(true, type.mutable);
assertEquals(2, Object.getOwnPropertyNames(type).length);
global = new WebAssembly.Global({value: "externref"});
- type = WebAssembly.Global.type(global);
+ type = global.type();
assertEquals("externref", type.value);
assertEquals(false, type.mutable);
assertEquals(2, Object.getOwnPropertyNames(type).length);
global = new WebAssembly.Global({value: "anyfunc"});
- type = WebAssembly.Global.type(global);
+ type = global.type();
assertEquals("anyfunc", type.value);
assertEquals(false, type.mutable);
assertEquals(2, Object.getOwnPropertyNames(type).length);
diff --git a/deps/v8/test/mjsunit/wasm/type-reflection.js b/deps/v8/test/mjsunit/wasm/type-reflection.js
index 4f2638ec7f..f88cf15136 100644
--- a/deps/v8/test/mjsunit/wasm/type-reflection.js
+++ b/deps/v8/test/mjsunit/wasm/type-reflection.js
@@ -6,55 +6,19 @@
d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
-(function TestInvalidArgumentToType() {
- ["abc", 123, {}, _ => 0].forEach(function(invalidInput) {
- assertThrows(
- () => WebAssembly.Memory.type(invalidInput), TypeError,
- "WebAssembly.Memory.type(): Argument 0 must be a WebAssembly.Memory");
- assertThrows(
- () => WebAssembly.Table.type(invalidInput), TypeError,
- "WebAssembly.Table.type(): Argument 0 must be a WebAssembly.Table");
- assertThrows(
- () => WebAssembly.Global.type(invalidInput), TypeError,
- "WebAssembly.Global.type(): Argument 0 must be a WebAssembly.Global");
- assertThrows(
- () => WebAssembly.Function.type(invalidInput), TypeError,
- "WebAssembly.Function.type(): Argument 0 must be a WebAssembly.Function");
- });
-
- assertThrows(
- () => WebAssembly.Memory.type(
- new WebAssembly.Table({initial:1, element: "anyfunc"})),
- TypeError,
- "WebAssembly.Memory.type(): Argument 0 must be a WebAssembly.Memory");
-
- assertThrows(
- () => WebAssembly.Table.type(
- new WebAssembly.Memory({initial:1})), TypeError,
- "WebAssembly.Table.type(): Argument 0 must be a WebAssembly.Table");
-
- assertThrows(
- () => WebAssembly.Global.type(
- new WebAssembly.Memory({initial:1})), TypeError,
- "WebAssembly.Global.type(): Argument 0 must be a WebAssembly.Global");
-
- assertThrows(
- () => WebAssembly.Function.type(
- new WebAssembly.Memory({initial:1})), TypeError,
- "WebAssembly.Function.type(): Argument 0 must be a WebAssembly.Function");
-})();
-
(function TestMemoryType() {
let mem = new WebAssembly.Memory({initial: 1});
- let type = WebAssembly.Memory.type(mem);
+ let type = mem.type();
assertEquals(1, type.minimum);
- assertEquals(1, Object.getOwnPropertyNames(type).length);
+ assertEquals(false, type.shared);
+ assertEquals(2, Object.getOwnPropertyNames(type).length);
mem = new WebAssembly.Memory({initial: 2, maximum: 15});
- type = WebAssembly.Memory.type(mem);
+ type = mem.type();
assertEquals(2, type.minimum);
assertEquals(15, type.maximum);
- assertEquals(2, Object.getOwnPropertyNames(type).length);
+ assertEquals(false, type.shared);
+ assertEquals(3, Object.getOwnPropertyNames(type).length);
})();
(function TestMemoryExports() {
@@ -105,14 +69,14 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
(function TestTableType() {
let table = new WebAssembly.Table({initial: 1, element: "anyfunc"});
- let type = WebAssembly.Table.type(table);
+ let type = table.type();
assertEquals(1, type.minimum);
assertEquals("anyfunc", type.element);
assertEquals(undefined, type.maximum);
assertEquals(2, Object.getOwnPropertyNames(type).length);
table = new WebAssembly.Table({initial: 2, maximum: 15, element: "anyfunc"});
- type = WebAssembly.Table.type(table);
+ type = table.type();
assertEquals(2, type.minimum);
assertEquals(15, type.maximum);
assertEquals("anyfunc", type.element);
@@ -171,31 +135,31 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
(function TestGlobalType() {
let global = new WebAssembly.Global({value: "i32", mutable: true});
- let type = WebAssembly.Global.type(global);
+ let type = global.type();
assertEquals("i32", type.value);
assertEquals(true, type.mutable);
assertEquals(2, Object.getOwnPropertyNames(type).length);
global = new WebAssembly.Global({value: "i32"});
- type = WebAssembly.Global.type(global);
+ type = global.type();
assertEquals("i32", type.value);
assertEquals(false, type.mutable);
assertEquals(2, Object.getOwnPropertyNames(type).length);
global = new WebAssembly.Global({value: "i64"});
- type = WebAssembly.Global.type(global);
+ type = global.type();
assertEquals("i64", type.value);
assertEquals(false, type.mutable);
assertEquals(2, Object.getOwnPropertyNames(type).length);
global = new WebAssembly.Global({value: "f32"});
- type = WebAssembly.Global.type(global);
+ type = global.type();
assertEquals("f32", type.value);
assertEquals(false, type.mutable);
assertEquals(2, Object.getOwnPropertyNames(type).length);
global = new WebAssembly.Global({value: "f64"});
- type = WebAssembly.Global.type(global);
+ type = global.type();
assertEquals("f64", type.value);
assertEquals(false, type.mutable);
assertEquals(2, Object.getOwnPropertyNames(type).length);
@@ -242,62 +206,63 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
(function TestMemoryConstructorWithMinimum() {
let mem = new WebAssembly.Memory({minimum: 1});
assertTrue(mem instanceof WebAssembly.Memory);
- let type = WebAssembly.Memory.type(mem);
+ let type = mem.type();
assertEquals(1, type.minimum);
- assertEquals(1, Object.getOwnPropertyNames(type).length);
+ assertEquals(false, type.shared);
+ assertEquals(2, Object.getOwnPropertyNames(type).length);
- mem = new WebAssembly.Memory({minimum: 1, maximum: 5});
+ mem = new WebAssembly.Memory({minimum: 1, maximum: 5, shared: false});
assertTrue(mem instanceof WebAssembly.Memory);
- type = WebAssembly.Memory.type(mem);
+ type = mem.type();
assertEquals(1, type.minimum);
assertEquals(5, type.maximum);
- assertEquals(2, Object.getOwnPropertyNames(type).length);
-
- mem = new WebAssembly.Memory({minimum: 1, initial: 2});
- assertTrue(mem instanceof WebAssembly.Memory);
- type = WebAssembly.Memory.type(mem);
- assertEquals(2, type.minimum);
- assertEquals(1, Object.getOwnPropertyNames(type).length);
+ assertEquals(false, type.shared);
+ assertEquals(3, Object.getOwnPropertyNames(type).length);
- mem = new WebAssembly.Memory({minimum: 1, initial: 2, maximum: 5});
+ mem = new WebAssembly.Memory({initial: 1, maximum: 5, shared: true});
assertTrue(mem instanceof WebAssembly.Memory);
- type = WebAssembly.Memory.type(mem);
- assertEquals(2, type.minimum);
+ type = mem.type();
+ assertEquals(1, type.minimum);
assertEquals(5, type.maximum);
- assertEquals(2, Object.getOwnPropertyNames(type).length);
+ assertEquals(true, type.shared);
+ assertEquals(3, Object.getOwnPropertyNames(type).length);
+
+ assertThrows(
+ () => new WebAssembly.Memory({minimum: 1, initial: 2}), TypeError,
+ /The properties 'initial' and 'minimum' are not allowed at the same time/);
+
+ assertThrows(
+ () => new WebAssembly.Memory({minimum: 1, initial: 2, maximum: 5}),
+ TypeError,
+ /The properties 'initial' and 'minimum' are not allowed at the same time/);
})();
(function TestTableConstructorWithMinimum() {
let table = new WebAssembly.Table({minimum: 1, element: 'anyfunc'});
assertTrue(table instanceof WebAssembly.Table);
- let type = WebAssembly.Table.type(table);
+ let type = table.type();
assertEquals(1, type.minimum);
assertEquals('anyfunc', type.element);
assertEquals(2, Object.getOwnPropertyNames(type).length);
table = new WebAssembly.Table({minimum: 1, element: 'anyfunc', maximum: 5});
assertTrue(table instanceof WebAssembly.Table);
- type = WebAssembly.Table.type(table);
+ type = table.type();
assertEquals(1, type.minimum);
assertEquals(5, type.maximum);
assertEquals('anyfunc', type.element);
assertEquals(3, Object.getOwnPropertyNames(type).length);
- table = new WebAssembly.Table({minimum: 1, initial: 2, element: 'anyfunc'});
- assertTrue(table instanceof WebAssembly.Table);
- type = WebAssembly.Table.type(table);
- assertEquals(2, type.minimum);
- assertEquals('anyfunc', type.element);
- assertEquals(2, Object.getOwnPropertyNames(type).length);
+ assertThrows(
+ () => new WebAssembly.Table({minimum: 1, initial: 2, element: 'anyfunc'}),
+ TypeError,
+ /The properties 'initial' and 'minimum' are not allowed at the same time/);
- table = new WebAssembly.Table({minimum: 1, initial: 2, element: 'anyfunc',
- maximum: 5});
- assertTrue(table instanceof WebAssembly.Table);
- type = WebAssembly.Table.type(table);
- assertEquals(2, type.minimum);
- assertEquals(5, type.maximum);
- assertEquals('anyfunc', type.element);
- assertEquals(3, Object.getOwnPropertyNames(type).length);
+ assertThrows(
+ () => new WebAssembly.Table({minimum: 1, initial: 2, element: 'anyfunc',
+ maximum: 5}),
+ TypeError,
+ /The properties 'initial' and 'minimum' are not allowed at the same time/);
})();
(function TestFunctionConstructor() {
diff --git a/deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js b/deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js
index 7ed8769d50..4c36fb1ea9 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-gc-js-roundtrip.js
@@ -18,15 +18,15 @@ let instance = (() => {
builder.addFunction('struct_producer', makeSig([], [kWasmDataRef]))
.addBody([
- kGCPrefix, kExprRttCanon, struct, kGCPrefix, kExprStructNewDefault,
- struct
+ kGCPrefix, kExprRttCanon, struct, kGCPrefix,
+ kExprStructNewDefaultWithRtt, struct
])
.exportFunc();
builder.addFunction('array_producer', makeSig([], [kWasmDataRef]))
.addBody([
kExprI32Const, 10, kGCPrefix, kExprRttCanon, array, kGCPrefix,
- kExprArrayNewDefault, array
+ kExprArrayNewDefaultWithRtt, array
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index 4f0c32fbab..61dbb47e69 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -77,9 +77,9 @@ let kLocalNamesCode = 2;
let kWasmFunctionTypeForm = 0x60;
let kWasmStructTypeForm = 0x5f;
let kWasmArrayTypeForm = 0x5e;
-let kWasmFunctionExtendingTypeForm = 0x5d;
-let kWasmStructExtendingTypeForm = 0x5c;
-let kWasmArrayExtendingTypeForm = 0x5b;
+let kWasmFunctionSubtypeForm = 0x5d;
+let kWasmStructSubtypeForm = 0x5c;
+let kWasmArraySubtypeForm = 0x5b;
let kLimitsNoMaximum = 0x00;
let kLimitsWithMaximum = 0x01;
@@ -469,13 +469,15 @@ for (let prefix in kPrefixOpcodes) {
// GC opcodes
let kExprStructNewWithRtt = 0x01;
-let kExprStructNewDefault = 0x02;
+let kExprStructNewDefaultWithRtt = 0x02;
let kExprStructGet = 0x03;
let kExprStructGetS = 0x04;
let kExprStructGetU = 0x05;
let kExprStructSet = 0x06;
+let kExprStructNew = 0x07;
+let kExprStructNewDefault = 0x08;
let kExprArrayNewWithRtt = 0x11;
-let kExprArrayNewDefault = 0x12;
+let kExprArrayNewDefaultWithRtt = 0x12;
let kExprArrayGet = 0x13;
let kExprArrayGetS = 0x14;
let kExprArrayGetU = 0x15;
@@ -483,6 +485,9 @@ let kExprArraySet = 0x16;
let kExprArrayLen = 0x17;
let kExprArrayCopy = 0x18;
let kExprArrayInit = 0x19;
+let kExprArrayInitStatic = 0x1a;
+let kExprArrayNew = 0x1b;
+let kExprArrayNewDefault = 0x1c;
let kExprI31New = 0x20;
let kExprI31GetS = 0x21;
let kExprI31GetU = 0x22;
@@ -493,6 +498,10 @@ let kExprRefTest = 0x40;
let kExprRefCast = 0x41;
let kExprBrOnCast = 0x42;
let kExprBrOnCastFail = 0x43;
+let kExprRefTestStatic = 0x44;
+let kExprRefCastStatic = 0x45;
+let kExprBrOnCastStatic = 0x46;
+let kExprBrOnCastStaticFail = 0x47;
let kExprRefIsFunc = 0x50;
let kExprRefIsData = 0x51;
let kExprRefIsI31 = 0x52;
@@ -974,7 +983,6 @@ class Binary {
}
}
-
emit_init_expr_recursive(expr) {
switch (expr.kind) {
case kExprGlobalGet:
@@ -1004,20 +1012,24 @@ class Binary {
this.emit_u8(kExprRefNull);
this.emit_heap_type(expr.value);
break;
+ case kExprStructNew:
case kExprStructNewWithRtt:
+ case kExprStructNewDefault:
+ case kExprStructNewDefaultWithRtt:
for (let operand of expr.operands) {
this.emit_init_expr_recursive(operand);
}
this.emit_u8(kGCPrefix);
- this.emit_u8(kExprStructNewWithRtt);
+ this.emit_u8(expr.kind);
this.emit_u32v(expr.value);
break;
case kExprArrayInit:
+ case kExprArrayInitStatic:
for (let operand of expr.operands) {
this.emit_init_expr_recursive(operand);
}
this.emit_u8(kGCPrefix);
- this.emit_u8(kExprArrayInit);
+ this.emit_u8(expr.kind);
this.emit_u32v(expr.value);
this.emit_u32v(expr.operands.length - 1);
break;
@@ -1170,9 +1182,21 @@ class WasmInitExpr {
static StructNewWithRtt(type, args) {
return {kind: kExprStructNewWithRtt, value: type, operands: args};
}
+ static StructNew(type, args) {
+ return {kind: kExprStructNew, value: type, operands: args};
+ }
+ static StructNewDefaultWithRtt(type, rtt) {
+ return {kind: kExprStructNewDefaultWithRtt, value: type, operands: [rtt]};
+ }
+ static StructNewDefault(type) {
+ return {kind: kExprStructNewDefault, value: type, operands: []};
+ }
static ArrayInit(type, args) {
return {kind: kExprArrayInit, value: type, operands: args};
}
+ static ArrayInitStatic(type, args) {
+ return {kind: kExprArrayInitStatic, value: type, operands: args};
+ }
static RttCanon(type) {
return {kind: kExprRttCanon, value: type};
}
@@ -1256,11 +1280,11 @@ class WasmStruct {
}
}
-class WasmStructExtending extends WasmStruct {
+class WasmStructSubtype extends WasmStruct {
constructor(fields, supertype_idx) {
super(fields);
this.supertype = supertype_idx;
- this.type_form = kWasmStructExtendingTypeForm;
+ this.type_form = kWasmStructSubtypeForm;
}
}
@@ -1273,11 +1297,11 @@ class WasmArray {
}
}
-class WasmArrayExtending extends WasmArray {
+class WasmArraySubtype extends WasmArray {
constructor(type, mutability, supertype_idx) {
super(type, mutability);
this.supertype = supertype_idx;
- this.type_form = kWasmArrayExtendingTypeForm;
+ this.type_form = kWasmArraySubtypeForm;
}
}
class WasmElemSegment {
@@ -1402,8 +1426,9 @@ class WasmModuleBuilder {
return this.types.length - 1;
}
- addStructExtending(fields, supertype_idx) {
- this.types.push(new WasmStructExtending(fields, supertype_idx));
+ kGenericSuperType = 0xFFFFFFFE;
+ addStructSubtype(fields, supertype_idx = this.kGenericSuperType) {
+ this.types.push(new WasmStructSubtype(fields, supertype_idx));
return this.types.length - 1;
}
@@ -1412,8 +1437,8 @@ class WasmModuleBuilder {
return this.types.length - 1;
}
- addArrayExtending(type, mutability, supertype_idx) {
- this.types.push(new WasmArrayExtending(type, mutability, supertype_idx));
+ addArraySubtype(type, mutability, supertype_idx = this.kGenericSuperType) {
+ this.types.push(new WasmArraySubtype(type, mutability, supertype_idx));
return this.types.length - 1;
}
@@ -1651,15 +1676,23 @@ class WasmModuleBuilder {
section.emit_type(field.type);
section.emit_u8(field.mutability ? 1 : 0);
}
- if (type instanceof WasmStructExtending) {
- section.emit_u32v(type.supertype);
+ if (type instanceof WasmStructSubtype) {
+ if (type.supertype === this.kGenericSuperType) {
+ section.emit_u8(kDataRefCode);
+ } else {
+ section.emit_heap_type(type.supertype);
+ }
}
} else if (type instanceof WasmArray) {
section.emit_u8(type.type_form);
section.emit_type(type.type);
section.emit_u8(type.mutability ? 1 : 0);
- if (type instanceof WasmArrayExtending) {
- section.emit_u32v(type.supertype);
+ if (type instanceof WasmArraySubtype) {
+ if (type.supertype === this.kGenericSuperType) {
+ section.emit_u8(kDataRefCode);
+ } else {
+ section.emit_heap_type(type.supertype);
+ }
}
} else {
section.emit_u8(kWasmFunctionTypeForm);
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 66c2b7539d..f4ea91ab05 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -69,23 +69,6 @@
# https://code.google.com/p/v8/issues/detail?id=10958
'language/module-code/eval-gtbndng-indirect-faux-assertion': [FAIL],
- # DataView functions should also throw on detached buffers
- 'built-ins/DataView/detached-buffer': [FAIL],
- 'built-ins/DataView/prototype/byteLength/detached-buffer': [FAIL],
- 'built-ins/DataView/prototype/byteOffset/detached-buffer': [FAIL],
- 'built-ins/DataView/prototype/byteLength/instance-has-detached-buffer': [FAIL],
- 'built-ins/DataView/custom-proto-access-detaches-buffer': [FAIL],
- # copyWithin should also throw on detached buffers
- 'built-ins/TypedArray/prototype/copyWithin/coerced-values-end-detached-prototype': [FAIL],
- 'built-ins/TypedArray/prototype/copyWithin/coerced-values-start-detached': [FAIL],
- 'built-ins/TypedArray/prototype/copyWithin/coerced-values-end-detached': [FAIL],
- # fill should also throw on detached buffers
- 'built-ins/TypedArray/prototype/fill/coerced-value-detach': [FAIL],
- 'built-ins/TypedArray/prototype/fill/coerced-end-detach': [FAIL],
- 'built-ins/TypedArray/prototype/fill/coerced-start-detach': [FAIL],
- 'built-ins/TypedArray/prototype/includes/detached-buffer-during-fromIndex-returns-true-for-undefined': [FAIL],
- 'built-ins/TypedArray/prototype/includes/BigInt/detached-buffer-during-fromIndex-returns-true-for-undefined': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=4951
'language/expressions/assignment/destructuring/iterator-destructuring-property-reference-target-evaluation-order': [FAIL],
'language/expressions/assignment/destructuring/keyed-destructuring-property-reference-target-evaluation-order': [FAIL],
@@ -194,10 +177,6 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=4698
'language/expressions/call/tco-call-args': [SKIP],
- 'language/expressions/call/tco-cross-realm-class-construct': [SKIP],
- 'language/expressions/call/tco-cross-realm-class-derived-construct': [SKIP],
- 'language/expressions/call/tco-cross-realm-fun-call': [SKIP],
- 'language/expressions/call/tco-cross-realm-fun-construct': [SKIP],
'language/expressions/call/tco-member-args': [SKIP],
'language/expressions/call/tco-non-eval-function': [SKIP],
'language/expressions/call/tco-non-eval-function-dynamic': [SKIP],
@@ -240,8 +219,8 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=7472
'intl402/NumberFormat/currency-digits': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=12167
- 'intl402/DisplayNames/prototype/of/type-calendar-invalid': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=12209
+ 'intl402/Intl/supportedValuesOf/collations-accepted-by-Collator': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=7831
'language/statements/generators/generator-created-after-decl-inst': [FAIL],
@@ -308,13 +287,6 @@
'language/module-code/export-expname-binding-index': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=11111
- 'built-ins/ArrayBuffer/prototype/resize/resize-grow': [FAIL],
- 'built-ins/ArrayBuffer/prototype/resize/resize-same-size': [FAIL],
- 'built-ins/ArrayBuffer/prototype/resize/resize-same-size-zero-explicit': [FAIL],
- 'built-ins/ArrayBuffer/prototype/resize/resize-same-size-zero-implicit': [FAIL],
- 'built-ins/ArrayBuffer/prototype/resize/resize-shrink': [FAIL],
- 'built-ins/ArrayBuffer/prototype/resize/resize-shrink-zero-explicit': [FAIL],
- 'built-ins/ArrayBuffer/prototype/resize/resize-shrink-zero-implicit': [FAIL],
'built-ins/ArrayBuffer/prototype/transfer/*': [FAIL],
'built-ins/ArrayBuffer/prototype/transfer/this-is-sharedarraybuffer': [PASS],
'built-ins/DataView/prototype/byteLength/resizable-array-buffer-auto': [FAIL],
@@ -341,6 +313,41 @@
'built-ins/DataView/prototype/setUint16/resizable-buffer': [FAIL],
'built-ins/DataView/prototype/setUint32/resizable-buffer': [FAIL],
'built-ins/DataView/prototype/setUint8/resizable-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/filter/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/filter/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/find/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/findIndex/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/findIndex/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/findLast/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/findLastIndex/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/findLastIndex/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/findLast/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/find/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/forEach/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/forEach/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/includes/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/includes/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/indexOf/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/indexOf/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/join/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/join/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/lastIndexOf/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/lastIndexOf/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/map/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/map/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/reduce/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/reduce/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/reduceRight/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/reduceRight/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/reverse/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/reverse/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/set/BigInt/typedarray-arg-target-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/set/BigInt/typedarray-arg-set-values-same-buffer-same-type-resized': [SKIP],
+ 'built-ins/TypedArray/prototype/set/typedarray-arg-target-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/sort/BigInt/return-abrupt-from-this-out-of-bounds': [FAIL],
+ 'built-ins/TypedArray/prototype/sort/return-abrupt-from-this-out-of-bounds': [FAIL],
+ 'built-ins/TypedArray/prototype/toLocaleString/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/toLocaleString/return-abrupt-from-this-out-of-bounds': [SKIP],
'built-ins/TypedArrayConstructors/ctors/typedarray-arg/out-of-bounds-when-species-retrieved-different-type': [FAIL],
'built-ins/TypedArrayConstructors/ctors/typedarray-arg/out-of-bounds-when-species-retrieved-same-type': [FAIL],
@@ -361,10 +368,13 @@
'built-ins/ShadowRealm/prototype/evaluate/errors-from-the-other-realm-is-wrapped-into-a-typeerror': [FAIL],
'built-ins/ShadowRealm/prototype/evaluate/length': [FAIL],
'built-ins/ShadowRealm/prototype/evaluate/name': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/no-conditional-strict-mode': [FAIL],
'built-ins/ShadowRealm/prototype/evaluate/not-constructor': [FAIL],
'built-ins/ShadowRealm/prototype/evaluate/proto': [FAIL],
'built-ins/ShadowRealm/prototype/evaluate/returns-primitive-values': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/returns-proxy-callable-object': [FAIL],
'built-ins/ShadowRealm/prototype/evaluate/returns-symbol-values': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/throws-syntaxerror-on-bad-syntax': [FAIL],
'built-ins/ShadowRealm/prototype/evaluate/throws-typeerror-if-evaluation-resolves-to-non-primitive': [FAIL],
'built-ins/ShadowRealm/prototype/evaluate/throws-when-argument-is-not-a-string': [FAIL],
'built-ins/ShadowRealm/prototype/evaluate/validates-realm-object': [FAIL],
@@ -377,6 +387,9 @@
'built-ins/ShadowRealm/prototype/evaluate/wrapped-functions-new-wrapping-on-each-evaluation': [FAIL],
'built-ins/ShadowRealm/prototype/evaluate/wrapped-functions-share-no-properties': [FAIL],
'built-ins/ShadowRealm/prototype/evaluate/wrapped-functions-share-no-properties-extended': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/wrapped-function-multiple-different-realms-nested': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/wrapped-function-multiple-different-realms': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/nested-realms': [FAIL],
'built-ins/ShadowRealm/prototype/importValue/descriptor': [FAIL],
'built-ins/ShadowRealm/prototype/importValue/exportName-tostring': [FAIL],
'built-ins/ShadowRealm/prototype/importValue/import-value': [FAIL],
@@ -390,10 +403,6 @@
'built-ins/ShadowRealm/prototype/proto': [FAIL],
'built-ins/ShadowRealm/prototype/Symbol.toStringTag': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=12086
- 'language/expressions/in/private-field-invalid-assignment-reference': [FAIL],
- 'language/expressions/in/private-field-in-nested': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=12085
'language/statements/class/subclass/derived-class-return-override-catch-finally': [FAIL],
'language/statements/class/subclass/derived-class-return-override-catch-finally-arrow': [FAIL],
@@ -401,12 +410,6 @@
'language/statements/class/subclass/derived-class-return-override-finally-super-arrow': [FAIL],
'language/statements/class/subclass/derived-class-return-override-for-of-arrow': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=12168
- 'built-ins/Date/prototype/valueOf/S9.4_A3_T1': [FAIL],
- 'built-ins/Date/prototype/valueOf/S9.4_A3_T2': [FAIL],
- 'built-ins/Error/prototype/S15.11.4_A3': [FAIL],
- 'built-ins/Error/prototype/S15.11.4_A4': [FAIL],
-
######################## NEEDS INVESTIGATION ###########################
# https://bugs.chromium.org/p/v8/issues/detail?id=7833
@@ -445,65 +448,6 @@
'harness/detachArrayBuffer': [SKIP],
'harness/detachArrayBuffer-host-detachArrayBuffer': [SKIP],
-
- # https://github.com/tc39/test262/issues/3111
- 'built-ins/TypedArray/prototype/at/BigInt/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/at/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/byteOffset/BigInt/resizable-array-buffer-auto': [FAIL],
- 'built-ins/TypedArray/prototype/copyWithin/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/copyWithin/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/entries/BigInt/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/entries/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/every/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/every/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/fill/BigInt/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/fill/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/filter/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/filter/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/find/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/findIndex/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/findIndex/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/findLast/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/findLastIndex/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/findLastIndex/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/findLast/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/find/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/forEach/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/forEach/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/includes/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/includes/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/indexOf/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/indexOf/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/join/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/join/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/keys/BigInt/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/keys/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/lastIndexOf/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/lastIndexOf/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/map/BigInt/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/map/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/reduce/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/reduce/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/reduceRight/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/reduceRight/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/reverse/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/reverse/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/set/BigInt/typedarray-arg-set-values-same-buffer-same-type-resized': [FAIL],
- 'built-ins/TypedArray/prototype/set/typedarray-arg-target-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/slice/BigInt/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/slice/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/some/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/some/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/sort/BigInt/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/sort/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/toLocaleString/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/toLocaleString/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/values/BigInt/return-abrupt-from-this-out-of-bounds': [FAIL],
- 'built-ins/TypedArray/prototype/values/return-abrupt-from-this-out-of-bounds': [FAIL],
-
- # Pending update after https://github.com/tc39/proposal-resizablearraybuffer/issues/68
- 'built-ins/TypedArray/prototype/byteOffset/resizable-array-buffer-auto': [FAIL],
-
############################ SKIPPED TESTS #############################
# These tests take a looong time to run.
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index 139af67196..14a219147e 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -48,6 +48,7 @@ FEATURE_FLAGS = {
'Intl.Locale-info': '--harmony_intl_locale_info',
'Intl.DateTimeFormat-extend-timezonename': '--harmony_intl_more_timezone',
'Intl.DisplayNames-v2': '--harmony_intl_displaynames_v2',
+ 'Intl-enumeration': '--harmony_intl_enumeration',
'Symbol.prototype.description': '--harmony-symbol-description',
'FinalizationRegistry': '--harmony-weak-refs-with-cleanup-some',
'WeakRef': '--harmony-weak-refs-with-cleanup-some',
diff --git a/deps/v8/test/torque/test-torque.tq b/deps/v8/test/torque/test-torque.tq
index 1e11465f5a..1fc6f062b4 100644
--- a/deps/v8/test/torque/test-torque.tq
+++ b/deps/v8/test/torque/test-torque.tq
@@ -40,20 +40,20 @@ macro LabelTestHelper3(): never
}
@export
-macro TestConstexpr1() {
+macro TestConstexpr1(): void {
check(FromConstexpr<bool>(
IsFastElementsKind(ElementsKind::PACKED_SMI_ELEMENTS)));
}
@export
-macro TestConstexprIf() {
+macro TestConstexprIf(): void {
check(ElementsKindTestHelper1(ElementsKind::UINT8_ELEMENTS));
check(ElementsKindTestHelper1(ElementsKind::UINT16_ELEMENTS));
check(!ElementsKindTestHelper1(ElementsKind::UINT32_ELEMENTS));
}
@export
-macro TestConstexprReturn() {
+macro TestConstexprReturn(): void {
check(FromConstexpr<bool>(
ElementsKindTestHelper2(ElementsKind::UINT8_ELEMENTS)));
check(FromConstexpr<bool>(
@@ -103,7 +103,7 @@ GenericBuiltinTest<JSAny>(param: JSAny): JSAny {
}
@export
-macro TestBuiltinSpecialization() {
+macro TestBuiltinSpecialization(): void {
check(GenericBuiltinTest<Smi>(0) == Null);
check(GenericBuiltinTest<Smi>(1) == Null);
check(GenericBuiltinTest<JSAny>(Undefined) == Undefined);
@@ -160,7 +160,7 @@ GenericMacroTestWithLabels<Object>(param2: Object): Object
}
@export
-macro TestMacroSpecialization() {
+macro TestMacroSpecialization(): void {
try {
const _smi0: Smi = 0;
check(GenericMacroTest<Smi>(0) == Undefined);
@@ -208,7 +208,7 @@ macro TestTernaryOperator(x: Smi): Smi {
}
@export
-macro TestFunctionPointerToGeneric() {
+macro TestFunctionPointerToGeneric(): void {
const fptr1: builtin(Smi) => JSAny = GenericBuiltinTest<Smi>;
const fptr2: builtin(JSAny) => JSAny = GenericBuiltinTest<JSAny>;
@@ -236,19 +236,19 @@ macro TestUnsafeCast(implicit context: Context)(n: Number): Boolean {
}
@export
-macro TestHexLiteral() {
+macro TestHexLiteral(): void {
check(Convert<intptr>(0xffff) + 1 == 0x10000);
check(Convert<intptr>(-0xffff) == -65535);
}
@export
-macro TestLargeIntegerLiterals(implicit c: Context)() {
+macro TestLargeIntegerLiterals(implicit c: Context)(): void {
let _x: int32 = 0x40000000;
let _y: int32 = 0x7fffffff;
}
@export
-macro TestMultilineAssert() {
+macro TestMultilineAssert(): void {
const someVeryLongVariableNameThatWillCauseLineBreaks: Smi = 5;
check(
someVeryLongVariableNameThatWillCauseLineBreaks > 0 &&
@@ -256,7 +256,7 @@ macro TestMultilineAssert() {
}
@export
-macro TestNewlineInString() {
+macro TestNewlineInString(): void {
Print('Hello, World!\n');
}
@@ -265,14 +265,14 @@ const kIntptrConst: intptr = 4;
const kSmiConst: Smi = 3;
@export
-macro TestModuleConstBindings() {
+macro TestModuleConstBindings(): void {
check(kConstexprConst == Int32Constant(5));
check(kIntptrConst == 4);
check(kSmiConst == 3);
}
@export
-macro TestLocalConstBindings() {
+macro TestLocalConstBindings(): void {
const x: constexpr int31 = 3;
const xSmi: Smi = x;
{
@@ -347,7 +347,7 @@ Foo(TestStructA) {
goto Foo(TestStruct2());
}
@export // Silence unused warning.
-macro CallTestStructInLabel(implicit context: Context)() {
+macro CallTestStructInLabel(implicit context: Context)(): void {
try {
TestStructInLabel() otherwise Foo;
} label Foo(_s: TestStructA) {}
@@ -356,7 +356,7 @@ macro CallTestStructInLabel(implicit context: Context)() {
// This macro tests different versions of the for-loop where some parts
// are (not) present.
@export
-macro TestForLoop() {
+macro TestForLoop(): void {
let sum: Smi = 0;
for (let i: Smi = 0; i < 5; ++i) sum += i;
check(sum == 10);
@@ -455,7 +455,7 @@ macro TestForLoop() {
}
@export
-macro TestSubtyping(x: Smi) {
+macro TestSubtyping(x: Smi): void {
const _foo: JSAny = x;
}
@@ -501,7 +501,7 @@ macro TypeswitchExample(implicit context: Context)(x: NumberOrFixedArray):
}
@export
-macro TestTypeswitch(implicit context: Context)() {
+macro TestTypeswitch(implicit context: Context)(): void {
check(TypeswitchExample(FromConstexpr<Smi>(5)) == 26);
const a: FixedArray = AllocateZeroedFixedArray(3);
check(TypeswitchExample(a) == 13);
@@ -509,7 +509,8 @@ macro TestTypeswitch(implicit context: Context)() {
}
@export
-macro TestTypeswitchAsanLsanFailure(implicit context: Context)(obj: Object) {
+macro TestTypeswitchAsanLsanFailure(implicit context: Context)(obj: Object):
+ void {
typeswitch (obj) {
case (_o: Smi): {
}
@@ -530,7 +531,7 @@ macro ExampleGenericOverload<A: type>(o: Smi): A {
}
@export
-macro TestGenericOverload(implicit context: Context)() {
+macro TestGenericOverload(implicit context: Context)(): void {
const xSmi: Smi = 5;
const xObject: Object = xSmi;
check(ExampleGenericOverload<Smi>(xSmi) == 6);
@@ -538,7 +539,7 @@ macro TestGenericOverload(implicit context: Context)() {
}
@export
-macro TestEquality(implicit context: Context)() {
+macro TestEquality(implicit context: Context)(): void {
const notEqual: bool =
AllocateHeapNumberWithValue(0.5) != AllocateHeapNumberWithValue(0.5);
check(!notEqual);
@@ -558,7 +559,7 @@ macro TestAndOr(x: bool, y: bool, z: bool): bool {
}
@export
-macro TestLogicalOperators() {
+macro TestLogicalOperators(): void {
check(TestAndOr(true, true, true));
check(TestAndOr(true, true, false));
check(TestAndOr(true, false, true));
@@ -584,7 +585,7 @@ macro TestCall(i: Smi): Smi labels A {
}
@export
-macro TestOtherwiseWithCode1() {
+macro TestOtherwiseWithCode1(): void {
let v: Smi = 0;
let s: Smi = 1;
try {
@@ -592,41 +593,41 @@ macro TestOtherwiseWithCode1() {
} label B(v1: Smi) {
v = v1;
}
- assert(v == 2);
+ dcheck(v == 2);
}
@export
-macro TestOtherwiseWithCode2() {
+macro TestOtherwiseWithCode2(): void {
let s: Smi = 0;
for (let i: Smi = 0; i < 10; ++i) {
TestCall(i) otherwise break;
++s;
}
- assert(s == 5);
+ dcheck(s == 5);
}
@export
-macro TestOtherwiseWithCode3() {
+macro TestOtherwiseWithCode3(): void {
let s: Smi = 0;
for (let i: Smi = 0; i < 10; ++i) {
s += TestCall(i) otherwise break;
}
- assert(s == 10);
+ dcheck(s == 10);
}
@export
-macro TestForwardLabel() {
+macro TestForwardLabel(): void {
try {
goto A;
} label A {
goto B(5);
} label B(b: Smi) {
- assert(b == 5);
+ dcheck(b == 5);
}
}
@export
-macro TestQualifiedAccess(implicit context: Context)() {
+macro TestQualifiedAccess(implicit context: Context)(): void {
const s: Smi = 0;
check(!Is<JSArray>(s));
}
@@ -683,7 +684,7 @@ macro TestCatch3(implicit context: Context)(): Smi {
// iterator.tq.
@export
transitioning macro TestIterator(implicit context: Context)(
- o: JSReceiver, map: Map) {
+ o: JSReceiver, map: Map): void {
try {
const t1: JSAny = iterator::GetIteratorMethod(o);
const t2: iterator::IteratorRecord = iterator::GetIterator(o);
@@ -701,12 +702,12 @@ transitioning macro TestIterator(implicit context: Context)(
}
@export
-macro TestFrame1(implicit context: Context)() {
+macro TestFrame1(implicit context: Context)(): void {
const f: Frame = LoadFramePointer();
const frameType: FrameType =
Cast<FrameType>(f.context_or_frame_type) otherwise unreachable;
- assert(frameType == STUB_FRAME);
- assert(f.caller == LoadParentFramePointer());
+ dcheck(frameType == STUB_FRAME);
+ dcheck(f.caller == LoadParentFramePointer());
typeswitch (f) {
case (_f: StandardFrame): {
unreachable;
@@ -717,14 +718,14 @@ macro TestFrame1(implicit context: Context)() {
}
@export
-macro TestNew(implicit context: Context)() {
+macro TestNew(implicit context: Context)(): void {
const f: JSArray = NewJSArray();
check(f.IsEmpty());
f.length = 0;
}
struct TestInner {
- macro SetX(newValue: int32) {
+ macro SetX(newValue: int32): void {
this.x = newValue;
}
macro GetX(): int32 {
@@ -741,7 +742,7 @@ struct TestOuter {
}
@export
-macro TestStructConstructor(implicit context: Context)() {
+macro TestStructConstructor(implicit context: Context)(): void {
// Test default constructor
let a: TestOuter = TestOuter{a: 5, b: TestInner{x: 6, y: 7}, c: 8};
check(a.a == 5);
@@ -756,7 +757,7 @@ macro TestStructConstructor(implicit context: Context)() {
}
class InternalClass extends HeapObject {
- macro Flip() labels NotASmi {
+ macro Flip(): void labels NotASmi {
const tmp = Cast<Smi>(this.b) otherwise NotASmi;
this.b = this.a;
this.a = tmp;
@@ -770,7 +771,7 @@ macro NewInternalClass(x: Smi): InternalClass {
}
@export
-macro TestInternalClass(implicit context: Context)() {
+macro TestInternalClass(implicit context: Context)(): void {
const o = NewInternalClass(5);
o.Flip() otherwise unreachable;
check(o.a == 6);
@@ -789,7 +790,7 @@ struct StructWithConst {
}
@export
-macro TestConstInStructs() {
+macro TestConstInStructs(): void {
const x = StructWithConst{a: Null, b: 1};
let y = StructWithConst{a: Null, b: 1};
y.a = Undefined;
@@ -800,7 +801,7 @@ macro TestConstInStructs() {
}
@export
-macro TestParentFrameArguments(implicit context: Context)() {
+macro TestParentFrameArguments(implicit context: Context)(): void {
const parentFrame = LoadParentFramePointer();
const castFrame = Cast<StandardFrame>(parentFrame) otherwise unreachable;
const arguments = GetFrameArguments(castFrame, 1);
@@ -829,14 +830,14 @@ class SmiPair extends HeapObject {
b: Smi;
}
-macro Swap<T: type>(a:&T, b:&T) {
+macro Swap<T: type>(a:&T, b:&T): void {
const tmp = *a;
*a = *b;
*b = tmp;
}
@export
-macro TestReferences() {
+macro TestReferences(): void {
const array = new SmiPair{a: 7, b: 2};
const ref:&Smi = &array.a;
*ref = 3 + *ref;
@@ -847,7 +848,7 @@ macro TestReferences() {
}
@export
-macro TestSlices() {
+macro TestSlices(): void {
const it = TestIterator{count: 3};
const a = new FixedArray{map: kFixedArrayMap, length: 3, objects: ...it};
check(a.length == 3);
@@ -904,7 +905,7 @@ macro TestSliceEnumeration(implicit context: Context)(): Undefined {
}
@export
-macro TestStaticAssert() {
+macro TestStaticAssert(): void {
static_assert(1 + 2 == 3);
static_assert(Convert<uintptr>(5) < Convert<uintptr>(6));
@@ -932,7 +933,7 @@ builtin NewSmiBox(implicit context: Context)(value: Smi): SmiBox {
}
@export
-macro TestLoadEliminationFixed(implicit context: Context)() {
+macro TestLoadEliminationFixed(implicit context: Context)(): void {
const box = NewSmiBox(123);
const v1 = box.value;
box.unrelated = 999;
@@ -946,7 +947,7 @@ macro TestLoadEliminationFixed(implicit context: Context)() {
}
@export
-macro TestLoadEliminationVariable(implicit context: Context)() {
+macro TestLoadEliminationVariable(implicit context: Context)(): void {
const a = UnsafeCast<FixedArray>(kEmptyFixedArray);
const box = NewSmiBox(1);
const v1 = a.objects[box.value];
@@ -1035,7 +1036,8 @@ macro BranchAndWriteResult(x: Smi, box: SmiBox): bool {
}
@export
-macro TestBranchOnBoolOptimization(implicit context: Context)(input: Smi) {
+macro TestBranchOnBoolOptimization(implicit context: Context)(input: Smi):
+ void {
const box = NewSmiBox(1);
// If the two branches get combined into one, we should be able to determine
// the value of {box} statically.
@@ -1056,7 +1058,7 @@ bitfield struct TestBitFieldStruct extends uint8 {
@export
macro TestBitFieldLoad(
val: TestBitFieldStruct, expectedA: bool, expectedB: uint16,
- expectedC: uint32, expectedD: bool) {
+ expectedC: uint32, expectedD: bool): void {
check(val.a == expectedA);
check(val.b == expectedB);
check(val.c == expectedC);
@@ -1064,7 +1066,7 @@ macro TestBitFieldLoad(
}
@export
-macro TestBitFieldStore(val: TestBitFieldStruct) {
+macro TestBitFieldStore(val: TestBitFieldStruct): void {
let val: TestBitFieldStruct = val; // Get a mutable local copy.
const a: bool = val.a;
const b: uint16 = val.b;
@@ -1083,7 +1085,7 @@ macro TestBitFieldStore(val: TestBitFieldStruct) {
}
@export
-macro TestBitFieldInit(a: bool, b: uint16, c: uint32, d: bool) {
+macro TestBitFieldInit(a: bool, b: uint16, c: uint32, d: bool): void {
const val: TestBitFieldStruct = TestBitFieldStruct{a: a, b: b, c: c, d: d};
TestBitFieldLoad(val, a, b, c, d);
}
@@ -1102,7 +1104,7 @@ bitfield struct TestBitFieldStruct3 extends uintptr {
@export
macro TestBitFieldUintptrOps(
- val2: TestBitFieldStruct2, val3: TestBitFieldStruct3) {
+ val2: TestBitFieldStruct2, val3: TestBitFieldStruct3): void {
let val2: TestBitFieldStruct2 = val2; // Get a mutable local copy.
let val3: TestBitFieldStruct3 = val3; // Get a mutable local copy.
@@ -1142,7 +1144,7 @@ bitfield struct TestBitFieldStruct5 extends uint31 {
}
@export
-macro TestBitFieldMultipleFlags(a: bool, b: int32, c: bool) {
+macro TestBitFieldMultipleFlags(a: bool, b: int32, c: bool): void {
const f = TestBitFieldStruct4{a: a, b: b, c: c};
let simpleExpression = f.a & f.b == 3 & !f.c;
let expectedReduction = (Signed(f) & 0x1f) == Convert<int32>(1 | 3 << 1);
@@ -1222,7 +1224,7 @@ struct InternalClassStructElementGeneratorIterator {
}
@export
-macro TestFullyGeneratedClassWithElements() {
+macro TestFullyGeneratedClassWithElements(): void {
// Test creation, initialization and access of a fully generated class with
// simple (Smi) elements
const length: Smi = Convert<Smi>(3);
@@ -1234,12 +1236,12 @@ macro TestFullyGeneratedClassWithElements() {
value: 11
}
};
- assert(object1.length == 3);
- assert(object1.data == 0);
- assert(object1.object == Undefined);
- assert(object1.entries[0] == 11);
- assert(object1.entries[1] == 12);
- assert(object1.entries[2] == 13);
+ dcheck(object1.length == 3);
+ dcheck(object1.data == 0);
+ dcheck(object1.object == Undefined);
+ dcheck(object1.entries[0] == 11);
+ dcheck(object1.entries[1] == 12);
+ dcheck(object1.entries[2] == 13);
// Test creation, initialization and access of a fully generated class
// with elements that are a struct.
@@ -1255,20 +1257,20 @@ macro TestFullyGeneratedClassWithElements() {
}
};
- assert(object2.dummy1 == 44);
- assert(object2.dummy2 == 45);
- assert(object2.count == 3);
- assert(object2.data == 55);
- assert(object2.object == Undefined);
- assert(object2.entries[0] == 3);
- assert(object2.entries[1] == 4);
- assert(object2.entries[2] == 5);
- assert(object2.more_entries[0].a == 1);
- assert(object2.more_entries[0].b == 2);
- assert(object2.more_entries[1].a == 3);
- assert(object2.more_entries[1].b == 4);
- assert(object2.more_entries[2].a == 5);
- assert(object2.more_entries[2].b == 6);
+ dcheck(object2.dummy1 == 44);
+ dcheck(object2.dummy2 == 45);
+ dcheck(object2.count == 3);
+ dcheck(object2.data == 55);
+ dcheck(object2.object == Undefined);
+ dcheck(object2.entries[0] == 3);
+ dcheck(object2.entries[1] == 4);
+ dcheck(object2.entries[2] == 5);
+ dcheck(object2.more_entries[0].a == 1);
+ dcheck(object2.more_entries[0].b == 2);
+ dcheck(object2.more_entries[1].a == 3);
+ dcheck(object2.more_entries[1].b == 4);
+ dcheck(object2.more_entries[2].a == 5);
+ dcheck(object2.more_entries[2].b == 6);
}
@export
@@ -1285,7 +1287,7 @@ class ExportedSubClass2 extends ExportedSubClassBase {
}
@export
-macro TestGeneratedCastOperators(implicit context: Context)() {
+macro TestGeneratedCastOperators(implicit context: Context)(): void {
const a = new
ExportedSubClass{a: Null, b: Null, c_field: 3, d_field: 4, e_field: 5};
const b = new ExportedSubClassBase{a: Undefined, b: Null};
@@ -1294,39 +1296,39 @@ macro TestGeneratedCastOperators(implicit context: Context)() {
const aO: Object = a;
const bO: Object = b;
const cO: Object = c;
- assert(Is<ExportedSubClassBase>(aO));
- assert(Is<ExportedSubClass>(aO));
- assert(!Is<ExportedSubClass2>(aO));
- assert(Is<ExportedSubClassBase>(bO));
- assert(!Is<ExportedSubClass>(bO));
- assert(Is<ExportedSubClassBase>(cO));
- assert(!Is<ExportedSubClass>(cO));
- assert(Is<ExportedSubClass2>(cO));
+ dcheck(Is<ExportedSubClassBase>(aO));
+ dcheck(Is<ExportedSubClass>(aO));
+ dcheck(!Is<ExportedSubClass2>(aO));
+ dcheck(Is<ExportedSubClassBase>(bO));
+ dcheck(!Is<ExportedSubClass>(bO));
+ dcheck(Is<ExportedSubClassBase>(cO));
+ dcheck(!Is<ExportedSubClass>(cO));
+ dcheck(Is<ExportedSubClass2>(cO));
const jsf: JSFunction =
*NativeContextSlot(ContextSlot::REGEXP_FUNCTION_INDEX);
- assert(!Is<JSSloppyArgumentsObject>(jsf));
+ dcheck(!Is<JSSloppyArgumentsObject>(jsf));
const parameterValues = NewFixedArray(0, ConstantIterator(TheHole));
const elements = NewSloppyArgumentsElements(
0, context, parameterValues, ConstantIterator(TheHole));
const fastArgs = arguments::NewJSFastAliasedArgumentsObject(
elements, Convert<Smi>(0), jsf);
- assert(Is<JSArgumentsObject>(fastArgs));
+ dcheck(Is<JSArgumentsObject>(fastArgs));
}
extern runtime InYoungGeneration(implicit context: Context)(HeapObject):
Boolean;
@export
-macro TestNewPretenured(implicit context: Context)() {
+macro TestNewPretenured(implicit context: Context)(): void {
const obj = new (Pretenured) ExportedSubClassBase{a: Undefined, b: Null};
- assert(Is<ExportedSubClassBase>(obj));
- assert(InYoungGeneration(obj) == False);
+ dcheck(Is<ExportedSubClassBase>(obj));
+ dcheck(InYoungGeneration(obj) == False);
}
@export
-macro TestWord8Phi() {
+macro TestWord8Phi(): void {
for (let i: intptr = -5; i < 5; ++i) {
let x: int8;
if (i == -1) {
@@ -1339,7 +1341,7 @@ macro TestWord8Phi() {
}
@export
-macro TestOffHeapSlice(ptr: RawPtr<char8>, length: intptr) {
+macro TestOffHeapSlice(ptr: RawPtr<char8>, length: intptr): void {
const string = UnsafeCast<SeqOneByteString>(Convert<String>('Hello World!'));
check(*torque_internal::unsafe::NewOffHeapReference(ptr) == string.chars[0]);
@@ -1362,7 +1364,7 @@ builtin ReturnTwoValues(implicit context: Context)(
}
@export
-macro TestCallMultiReturnBuiltin(implicit context: Context)() {
+macro TestCallMultiReturnBuiltin(implicit context: Context)(): void {
const result = ReturnTwoValues(444, FromConstexpr<String>('hi'));
check(result.a == 445);
check(result.b == FromConstexpr<String>('hi').map);
@@ -1388,7 +1390,7 @@ macro AddSmiAndConstexprValues(a: Smi, b: constexpr int31): Smi {
}
@export
-macro TestCreateLazyNodeFromTorque() {
+macro TestCreateLazyNodeFromTorque(): void {
const lazy = %MakeLazy<Smi>('GetLazySmi');
const result = TestRunLazyTwice(lazy);
check(result == 6);
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index f854cee8c1..e2ea833cf9 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -493,18 +493,21 @@ v8_source_set("unittests_sources") {
]
}
- if (is_posix && v8_enable_webassembly) {
- sources += [ "wasm/trap-handler-posix-unittest.cc" ]
- }
+ if (v8_enable_webassembly) {
+ if (is_posix) {
+ sources += [ "wasm/trap-handler-posix-unittest.cc" ]
+ }
- if (is_win && v8_enable_webassembly) {
- sources += [ "wasm/trap-handler-win-unittest.cc" ]
- }
+ if (is_win) {
+ sources += [ "wasm/trap-handler-win-unittest.cc" ]
+ }
- # Include this test only on arm64 simulator builds on x64 on Linux.
- if (current_cpu == "x64" && v8_current_cpu == "arm64" && is_linux &&
- v8_enable_webassembly) {
- sources += [ "wasm/trap-handler-simulator-unittest.cc" ]
+ # Include this test only on arm64 simulator builds on x64 on Linux, Mac and
+ # Windows.
+ if (current_cpu == "x64" && v8_current_cpu == "arm64" &&
+ (is_linux || is_mac || is_win)) {
+ sources += [ "wasm/trap-handler-simulator-unittest.cc" ]
+ }
}
configs = [
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index bff5697b3f..6d59eadba8 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -5454,6 +5454,91 @@ TEST_F(InstructionSelectorTest, PokePairPrepareArgumentsSimd128) {
expected_poke_pair, expected_poke);
}
+struct SIMDConstZeroFcmTest {
+ const bool is_zero;
+ const uint8_t lane_size;
+ const Operator* (MachineOperatorBuilder::*fcm_operator)();
+ const ArchOpcode expected_op_left;
+ const ArchOpcode expected_op_right;
+ const size_t size;
+};
+
+static const SIMDConstZeroFcmTest SIMDConstZeroFcmTests[] = {
+ {true, 64, &MachineOperatorBuilder::F64x2Eq, kArm64FEq, kArm64FEq, 1},
+ {true, 64, &MachineOperatorBuilder::F64x2Ne, kArm64FNe, kArm64FNe, 1},
+ {true, 64, &MachineOperatorBuilder::F64x2Lt, kArm64FGt, kArm64FLt, 1},
+ {true, 64, &MachineOperatorBuilder::F64x2Le, kArm64FGe, kArm64FLe, 1},
+ {false, 64, &MachineOperatorBuilder::F64x2Eq, kArm64FEq, kArm64FEq, 2},
+ {false, 64, &MachineOperatorBuilder::F64x2Ne, kArm64FNe, kArm64FNe, 2},
+ {false, 64, &MachineOperatorBuilder::F64x2Lt, kArm64FLt, kArm64FLt, 2},
+ {false, 64, &MachineOperatorBuilder::F64x2Le, kArm64FLe, kArm64FLe, 2},
+ {true, 32, &MachineOperatorBuilder::F32x4Eq, kArm64FEq, kArm64FEq, 1},
+ {true, 32, &MachineOperatorBuilder::F32x4Ne, kArm64FNe, kArm64FNe, 1},
+ {true, 32, &MachineOperatorBuilder::F32x4Lt, kArm64FGt, kArm64FLt, 1},
+ {true, 32, &MachineOperatorBuilder::F32x4Le, kArm64FGe, kArm64FLe, 1},
+ {false, 32, &MachineOperatorBuilder::F32x4Eq, kArm64FEq, kArm64FEq, 2},
+ {false, 32, &MachineOperatorBuilder::F32x4Ne, kArm64FNe, kArm64FNe, 2},
+ {false, 32, &MachineOperatorBuilder::F32x4Lt, kArm64FLt, kArm64FLt, 2},
+ {false, 32, &MachineOperatorBuilder::F32x4Le, kArm64FLe, kArm64FLe, 2},
+};
+
+using InstructionSelectorSIMDConstZeroFcmTest =
+ InstructionSelectorTestWithParam<SIMDConstZeroFcmTest>;
+
+TEST_P(InstructionSelectorSIMDConstZeroFcmTest, ConstZero) {
+ const SIMDConstZeroFcmTest param = GetParam();
+ byte data[16] = {};
+ if (!param.is_zero) data[0] = 0xff;
+ // Const node on the left
+ {
+ StreamBuilder m(this, MachineType::Simd128(), MachineType::Simd128());
+ Node* cnst = m.S128Const(data);
+ Node* fcm =
+ m.AddNode((m.machine()->*param.fcm_operator)(), cnst, m.Parameter(0));
+ m.Return(fcm);
+ Stream s = m.Build();
+ ASSERT_EQ(param.size, s.size());
+ if (param.size == 1) {
+ EXPECT_EQ(param.expected_op_left, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(param.lane_size, LaneSizeField::decode(s[0]->opcode()));
+ } else {
+ EXPECT_EQ(kArm64S128Const, s[0]->arch_opcode());
+ EXPECT_EQ(param.expected_op_left, s[1]->arch_opcode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ EXPECT_EQ(param.lane_size, LaneSizeField::decode(s[1]->opcode()));
+ }
+ }
+ // Const node on the right
+ {
+ StreamBuilder m(this, MachineType::Simd128(), MachineType::Simd128());
+ Node* cnst = m.S128Const(data);
+ Node* fcm =
+ m.AddNode((m.machine()->*param.fcm_operator)(), m.Parameter(0), cnst);
+ m.Return(fcm);
+ Stream s = m.Build();
+ ASSERT_EQ(param.size, s.size());
+ if (param.size == 1) {
+ EXPECT_EQ(param.expected_op_right, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(param.lane_size, LaneSizeField::decode(s[0]->opcode()));
+ } else {
+ EXPECT_EQ(kArm64S128Const, s[0]->arch_opcode());
+ EXPECT_EQ(param.expected_op_right, s[1]->arch_opcode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ EXPECT_EQ(param.lane_size, LaneSizeField::decode(s[1]->opcode()));
+ }
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorSIMDConstZeroFcmTest,
+ ::testing::ValuesIn(SIMDConstZeroFcmTests));
+
} // namespace
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-unittest.cc
index 54652e30ae..0a36179a60 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-unittest.cc
@@ -61,6 +61,22 @@ TEST_F(InstructionTest, OperandInterference) {
}
}
+ // 128 bit slots can interfere with other slots at a different index.
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < 128 / kBitsPerByte / kSystemPointerSize; ++j) {
+ EXPECT_TRUE(Interfere(LocationOperand::STACK_SLOT,
+ MachineRepresentation::kSimd128, i, kWord, i - j));
+ EXPECT_TRUE(Interfere(LocationOperand::STACK_SLOT,
+ MachineRepresentation::kSimd128, i, kFloat, i - j));
+ EXPECT_TRUE(Interfere(LocationOperand::STACK_SLOT,
+ MachineRepresentation::kSimd128, i, kDouble,
+ i - j));
+ EXPECT_TRUE(Interfere(LocationOperand::STACK_SLOT,
+ MachineRepresentation::kSimd128, i,
+ MachineRepresentation::kSimd128, i - j));
+ }
+ }
+
// All FP registers interfere with themselves.
for (int i = 0; i < RegisterConfiguration::kMaxFPRegisters; ++i) {
EXPECT_TRUE(Interfere(LocationOperand::REGISTER, kFloat, i, kFloat, i));
diff --git a/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc b/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
index 22481467e9..52ca8c7f6e 100644
--- a/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
@@ -31,7 +31,7 @@ class BranchEliminationTest : public GraphTest {
GraphReducer graph_reducer(zone(), graph(), tick_counter(), broker(),
jsgraph.Dead());
BranchElimination branch_condition_elimination(&graph_reducer, &jsgraph,
- zone());
+ zone(), nullptr);
graph_reducer.AddReducer(&branch_condition_elimination);
graph_reducer.ReduceGraph();
}
diff --git a/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc b/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
index c37aeeb839..26cfe3ead0 100644
--- a/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
@@ -28,15 +28,15 @@ class ControlEquivalenceTest : public GraphTest {
}
protected:
- void ComputeEquivalence(Node* node) {
- graph()->SetEnd(graph()->NewNode(common()->End(1), node));
+ void ComputeEquivalence(Node* end_node) {
+ graph()->SetEnd(graph()->NewNode(common()->End(1), end_node));
if (FLAG_trace_turbo) {
SourcePositionTable table(graph());
NodeOriginTable table2(graph());
StdoutStream{} << AsJSON(*graph(), &table, &table2);
}
ControlEquivalence equivalence(zone(), graph());
- equivalence.Run(node);
+ equivalence.Run(end_node);
classes_.resize(graph()->NodeCount());
for (Node* node : all_nodes_) {
classes_[node->id()] = equivalence.ClassOf(node);
diff --git a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
index bb965cce94..98de4c7f7c 100644
--- a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
@@ -712,7 +712,7 @@ TEST_F(GraphReducerTest, Forwarding3) {
A1Forwarder r;
- for (size_t i = 0; i < 3; i++) {
+ for (size_t j = 0; j < 3; j++) {
size_t before = graph()->NodeCount();
ReduceGraph(&r);
EXPECT_EQ(before, graph()->NodeCount());
diff --git a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
index 08c16f60c0..b6376ff280 100644
--- a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -95,7 +95,7 @@ TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat32ToFloat64, s[0]->arch_opcode());
+ EXPECT_EQ(kIA32Float32ToFloat64, s[0]->arch_opcode());
EXPECT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -106,7 +106,7 @@ TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat64ToFloat32, s[0]->arch_opcode());
+ EXPECT_EQ(kIA32Float64ToFloat32, s[0]->arch_opcode());
EXPECT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -161,7 +161,7 @@ TEST_F(InstructionSelectorTest, ChangeUint32ToFloat64WithParameter) {
m.Return(m.ChangeUint32ToFloat64(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEUint32ToFloat64, s[0]->arch_opcode());
+ EXPECT_EQ(kIA32Uint32ToFloat64, s[0]->arch_opcode());
}
diff --git a/deps/v8/test/unittests/compiler/loong64/instruction-selector-loong64-unittest.cc b/deps/v8/test/unittests/compiler/loong64/instruction-selector-loong64-unittest.cc
index fa0cd23a86..c69d4324f2 100644
--- a/deps/v8/test/unittests/compiler/loong64/instruction-selector-loong64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/loong64/instruction-selector-loong64-unittest.cc
@@ -235,12 +235,17 @@ const Conversion kConversionInstructions[] = {
// LOONG64 instructions that clear the top 32 bits of the destination.
const MachInst2 kCanElideChangeUint32ToUint64[] = {
- {&RawMachineAssembler::Uint32Div, "Uint32Div", kLoong64Div_wu,
+ {&RawMachineAssembler::Word32Equal, "Word32Equal", kLoong64Cmp,
MachineType::Uint32()},
- {&RawMachineAssembler::Uint32Mod, "Uint32Mod", kLoong64Mod_wu,
+ {&RawMachineAssembler::Int32LessThan, "Int32LessThan", kLoong64Cmp,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
+ kLoong64Cmp, MachineType::Uint32()},
+ {&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kLoong64Cmp,
MachineType::Uint32()},
- {&RawMachineAssembler::Uint32MulHigh, "Uint32MulHigh", kLoong64Mulh_wu,
- MachineType::Uint32()}};
+ {&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
+ kLoong64Cmp, MachineType::Uint32()},
+};
} // namespace
@@ -991,13 +996,10 @@ TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) {
(m.*binop.constructor)(m.Parameter(0), m.Parameter(1))));
Stream s = m.Build();
// Make sure the `ChangeUint32ToUint64` node turned into a no-op.
- ASSERT_EQ(2U, s.size());
+ ASSERT_EQ(1U, s.size());
EXPECT_EQ(binop.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(kLoong64Bstrpick_d, s[1]->arch_opcode());
- EXPECT_EQ(3U, s[1]->InputCount());
- EXPECT_EQ(1U, s[1]->OutputCount());
}
INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
diff --git a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
index 27e6b32560..8c8178f48b 100644
--- a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
+++ b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
@@ -523,10 +523,10 @@ TEST_F(LoopPeelingTest, SimpleLoopWithUnmarkedExit) {
{
LoopTree* loop_tree = GetLoopTree();
- LoopTree::Loop* loop = loop_tree->outer_loops()[0];
+ LoopTree::Loop* outer_loop = loop_tree->outer_loops()[0];
LoopPeeler peeler(graph(), common(), loop_tree, zone(), source_positions(),
node_origins());
- EXPECT_FALSE(peeler.CanPeel(loop));
+ EXPECT_FALSE(peeler.CanPeel(outer_loop));
}
}
diff --git a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
index 0b8c75cc44..c24ad5b48e 100644
--- a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
@@ -289,12 +289,17 @@ const Conversion kFloat32RoundInstructions[] = {
// MIPS64 instructions that clear the top 32 bits of the destination.
const MachInst2 kCanElideChangeUint32ToUint64[] = {
- {&RawMachineAssembler::Uint32Div, "Uint32Div", kMips64DivU,
+ {&RawMachineAssembler::Word32Equal, "Word32Equal", kMips64Cmp,
MachineType::Uint32()},
- {&RawMachineAssembler::Uint32Mod, "Uint32Mod", kMips64ModU,
+ {&RawMachineAssembler::Int32LessThan, "Int32LessThan", kMips64Cmp,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
+ kMips64Cmp, MachineType::Uint32()},
+ {&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kMips64Cmp,
MachineType::Uint32()},
- {&RawMachineAssembler::Uint32MulHigh, "Uint32MulHigh", kMips64MulHighU,
- MachineType::Uint32()}};
+ {&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
+ kMips64Cmp, MachineType::Uint32()},
+};
} // namespace
@@ -1159,10 +1164,22 @@ TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) {
(m.*binop.constructor)(m.Parameter(0), m.Parameter(1))));
Stream s = m.Build();
// Make sure the `ChangeUint32ToUint64` node turned into a no-op.
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(binop.arch_opcode, s[0]->arch_opcode());
- EXPECT_EQ(2U, s[0]->InputCount());
- EXPECT_EQ(1U, s[0]->OutputCount());
+ if (FLAG_debug_code && binop.arch_opcode == kMips64Cmp) {
+ ASSERT_EQ(6U, s.size());
+ EXPECT_EQ(kMips64Cmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMips64Dshl, s[1]->arch_opcode());
+ EXPECT_EQ(kMips64Dshl, s[2]->arch_opcode());
+ EXPECT_EQ(kMips64Cmp, s[3]->arch_opcode());
+ EXPECT_EQ(kMips64AssertEqual, s[4]->arch_opcode());
+ EXPECT_EQ(kMips64Cmp, s[5]->arch_opcode());
+ EXPECT_EQ(2U, s[5]->InputCount());
+ EXPECT_EQ(1U, s[5]->OutputCount());
+ } else {
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(binop.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
}
INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
diff --git a/deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc b/deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc
index d81f5adabe..9b7687b6c4 100644
--- a/deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/riscv64/instruction-selector-riscv64-unittest.cc
@@ -995,13 +995,14 @@ TEST_F(InstructionSelectorTest, ChangeUint32ToUint64AfterLoad) {
m.Return(m.ChangeUint32ToUint64(
m.Load(MachineType::Uint32(), m.Parameter(0), m.Parameter(1))));
Stream s = m.Build();
- ASSERT_EQ(2U, s.size());
+ ASSERT_EQ(3U, s.size());
EXPECT_EQ(kRiscvAdd64, s[0]->arch_opcode());
EXPECT_EQ(kMode_None, s[0]->addressing_mode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(kRiscvLwu, s[1]->arch_opcode());
+ EXPECT_EQ(kRiscvLw, s[1]->arch_opcode());
EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(kRiscvZeroExtendWord, s[2]->arch_opcode());
EXPECT_EQ(2U, s[1]->InputCount());
EXPECT_EQ(1U, s[1]->OutputCount());
}
diff --git a/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc b/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc
index 68a7ffea4a..1ba86542b0 100644
--- a/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc
+++ b/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc
@@ -106,8 +106,8 @@ TEST_F(SchedulerRPOTest, Line) {
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
CheckRPONumbers(order, 1 + i, false);
- for (size_t i = 0; i < schedule.BasicBlockCount(); i++) {
- BasicBlock* block = schedule.GetBlockById(BasicBlock::Id::FromSize(i));
+ for (size_t j = 0; j < schedule.BasicBlockCount(); j++) {
+ BasicBlock* block = schedule.GetBlockById(BasicBlock::Id::FromSize(j));
if (block->rpo_number() >= 0 && block->SuccessorCount() == 1) {
EXPECT_EQ(block->rpo_number() + 1, block->SuccessorAt(0)->rpo_number());
}
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index 242aa93f8d..bede5d5441 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -200,7 +200,7 @@ class TyperTest : public TypedGraphTest {
Type r1 = RandomRange();
Type r2 = RandomRange();
Type expected_type = TypeBinaryOp(op, r1, r2);
- for (int i = 0; i < 10; i++) {
+ for (int j = 0; j < 10; j++) {
double x1 = RandomInt(r1.AsRange());
double x2 = RandomInt(r2.AsRange());
double result_value = opfun(x1, x2);
@@ -229,7 +229,7 @@ class TyperTest : public TypedGraphTest {
Type r1 = RandomRange();
Type r2 = RandomRange();
Type expected_type = TypeBinaryOp(op, r1, r2);
- for (int i = 0; i < 10; i++) {
+ for (int j = 0; j < 10; j++) {
double x1 = RandomInt(r1.AsRange());
double x2 = RandomInt(r2.AsRange());
bool result_value = opfun(x1, x2);
@@ -249,7 +249,7 @@ class TyperTest : public TypedGraphTest {
Type r1 = RandomRange(true);
Type r2 = RandomRange(true);
Type expected_type = TypeBinaryOp(op, r1, r2);
- for (int i = 0; i < 10; i++) {
+ for (int j = 0; j < 10; j++) {
int32_t x1 = static_cast<int32_t>(RandomInt(r1.AsRange()));
int32_t x2 = static_cast<int32_t>(RandomInt(r2.AsRange()));
double result_value = opfun(x1, x2);
diff --git a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index 2adac76d66..1ba4a29ceb 100644
--- a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -5,6 +5,7 @@
#include <limits>
#include "src/common/globals.h"
+#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/objects/objects-inl.h"
#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
@@ -2269,6 +2270,22 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
InstructionSelectorSIMDSwizzleConstantTest,
::testing::ValuesIn(kSwizzleConstants));
+TEST_F(InstructionSelectorTest, F64x2PromoteLowF32x4WithS128Load64Zero) {
+ StreamBuilder m(this, MachineType::Simd128(), MachineType::Int32());
+ Node* const load =
+ m.AddNode(m.machine()->LoadTransform(MemoryAccessKind::kProtected,
+ LoadTransformation::kS128Load64Zero),
+ m.Int32Constant(2), m.Parameter(0));
+ Node* const promote = m.AddNode(m.machine()->F64x2PromoteLowF32x4(), load);
+ m.Return(promote);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(kX64F64x2PromoteLowF32x4, s[0]->arch_opcode());
+ ASSERT_EQ(kMode_MRI, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/debug/debug-property-iterator-unittest.cc b/deps/v8/test/unittests/debug/debug-property-iterator-unittest.cc
index fa14c7dc0a..feb4f4f2ad 100644
--- a/deps/v8/test/unittests/debug/debug-property-iterator-unittest.cc
+++ b/deps/v8/test/unittests/debug/debug-property-iterator-unittest.cc
@@ -98,6 +98,56 @@ TEST_F(DebugPropertyIteratorTest, DoestWalksPrototypeChainIfInaccesible) {
ASSERT_TRUE(iterator->Done());
}
+TEST_F(DebugPropertyIteratorTest, SkipsIndicesOnArrays) {
+ TryCatch try_catch(isolate());
+
+ Local<Value> elements[2] = {
+ Number::New(isolate(), 21),
+ Number::New(isolate(), 42),
+ };
+ auto array = Array::New(isolate(), elements, arraysize(elements));
+
+ auto iterator = PropertyIterator::Create(context(), array, true);
+ while (!iterator->Done()) {
+ ASSERT_FALSE(iterator->is_array_index());
+ ASSERT_TRUE(iterator->Advance().FromMaybe(false));
+ }
+}
+
+TEST_F(DebugPropertyIteratorTest, SkipsIndicesOnObjects) {
+ TryCatch try_catch(isolate());
+
+ Local<Name> names[2] = {
+ String::NewFromUtf8Literal(isolate(), "42"),
+ String::NewFromUtf8Literal(isolate(), "x"),
+ };
+ Local<Value> values[arraysize(names)] = {
+ Number::New(isolate(), 42),
+ Number::New(isolate(), 21),
+ };
+ Local<Object> object =
+ Object::New(isolate(), Null(isolate()), names, values, arraysize(names));
+
+ auto iterator = PropertyIterator::Create(context(), object, true);
+ while (!iterator->Done()) {
+ ASSERT_FALSE(iterator->is_array_index());
+ ASSERT_TRUE(iterator->Advance().FromMaybe(false));
+ }
+}
+
+TEST_F(DebugPropertyIteratorTest, SkipsIndicesOnTypedArrays) {
+ TryCatch try_catch(isolate());
+
+ auto buffer = ArrayBuffer::New(isolate(), 1024 * 1024);
+ auto array = Uint8Array::New(buffer, 0, 1024 * 1024);
+
+ auto iterator = PropertyIterator::Create(context(), array, true);
+ while (!iterator->Done()) {
+ ASSERT_FALSE(iterator->is_array_index());
+ ASSERT_TRUE(iterator->Advance().FromMaybe(false));
+ }
+}
+
} // namespace
} // namespace debug
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc b/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
index b349b591ca..534f744e7e 100644
--- a/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/ephemeron-pair-unittest.cc
@@ -242,50 +242,5 @@ TEST_F(EphemeronPairTest, EphemeronPairWithEmptyMixinValue) {
FinishMarking();
}
-namespace {
-
-class KeyWithCallback final : public GarbageCollected<KeyWithCallback> {
- public:
- template <typename Callback>
- explicit KeyWithCallback(Callback callback) {
- callback(this);
- }
- void Trace(Visitor*) const {}
-};
-
-class EphemeronHolderForKeyWithCallback final
- : public GarbageCollected<EphemeronHolderForKeyWithCallback> {
- public:
- EphemeronHolderForKeyWithCallback(KeyWithCallback* key, GCed* value)
- : ephemeron_pair_(key, value) {}
- void Trace(cppgc::Visitor* visitor) const { visitor->Trace(ephemeron_pair_); }
-
- private:
- const EphemeronPair<KeyWithCallback, GCed> ephemeron_pair_;
-};
-
-} // namespace
-
-TEST_F(EphemeronPairTest, EphemeronPairWithKeyInConstruction) {
- GCed* value = MakeGarbageCollected<GCed>(GetAllocationHandle());
- Persistent<EphemeronHolderForKeyWithCallback> holder;
- InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get());
- FinishSteps();
- MakeGarbageCollected<KeyWithCallback>(
- GetAllocationHandle(), [this, &holder, value](KeyWithCallback* thiz) {
- // The test doesn't use conservative stack scanning to retain key to
- // avoid retaining value as a side effect.
- EXPECT_TRUE(HeapObjectHeader::FromObject(thiz).TryMarkAtomic());
- holder = MakeGarbageCollected<EphemeronHolderForKeyWithCallback>(
- GetAllocationHandle(), thiz, value);
- // Finishing marking at this point will leave an ephemeron pair
- // reachable where the key is still in construction. The GC needs to
- // mark the value for such pairs as live in the atomic pause as they key
- // is considered live.
- FinishMarking();
- });
- EXPECT_TRUE(HeapObjectHeader::FromObject(value).IsMarked());
-}
-
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/prefinalizer-unittest.cc b/deps/v8/test/unittests/heap/cppgc/prefinalizer-unittest.cc
index 5fc412c8c3..985938bede 100644
--- a/deps/v8/test/unittests/heap/cppgc/prefinalizer-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/prefinalizer-unittest.cc
@@ -292,9 +292,11 @@ class GCedHolder : public GarbageCollected<GCedHolder> {
} // namespace
#if V8_ENABLE_CHECKS
-#ifdef CPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS
+#ifdef CPPGC_VERIFY_HEAP
-TEST_F(PrefinalizerDeathTest, PrefinalizerCantRewireGraphWithDeadObjects) {
+TEST_F(PrefinalizerDeathTest, PrefinalizerCanRewireGraphWithDeadObjects) {
+ // Prefinalizers are allowed to rewire dead object to dead objects as that
+ // doesn't affect the live object graph.
Persistent<LinkedNode> root{MakeGarbageCollected<LinkedNode>(
GetAllocationHandle(),
MakeGarbageCollected<LinkedNode>(
@@ -305,7 +307,7 @@ TEST_F(PrefinalizerDeathTest, PrefinalizerCantRewireGraphWithDeadObjects) {
// All LinkedNode objects will die on the following GC. The pre-finalizer may
// still operate with them but not add them to a live object.
root.Clear();
- EXPECT_DEATH_IF_SUPPORTED(PreciseGC(), "");
+ PreciseGC();
}
TEST_F(PrefinalizerDeathTest, PrefinalizerCantRessurectObjectOnStack) {
@@ -325,7 +327,7 @@ TEST_F(PrefinalizerDeathTest, PrefinalizerCantRessurectObjectOnHeap) {
EXPECT_DEATH_IF_SUPPORTED(PreciseGC(), "");
}
-#endif // CPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS
+#endif // CPPGC_VERIFY_HEAP
#endif // V8_ENABLE_CHECKS
#ifdef CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS
diff --git a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc b/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
index 28c40b2671..3e5813dfe9 100644
--- a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
+++ b/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
@@ -153,7 +153,7 @@ TEST_F(LocalEmbedderHeapTracerWithIsolate,
EmbedderStackStateScope scope(
&local_tracer, EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers);
{
- EmbedderStackStateScope scope(
+ EmbedderStackStateScope nested_scope(
&local_tracer,
EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
EXPECT_CALL(
diff --git a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
index eefacb33ed..c8ec9ca563 100644
--- a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
@@ -179,10 +179,10 @@ TEST_F(GCTracerTest, RegularScope) {
EXPECT_DOUBLE_EQ(0.0, tracer->current_.scopes[GCTracer::Scope::MC_MARK]);
// Sample not added because it's not within a started tracer.
tracer->AddScopeSample(GCTracer::Scope::MC_MARK, 100);
- tracer->Start(MARK_COMPACTOR, GarbageCollectionReason::kTesting,
- "collector unittest");
+ tracer->Start(GarbageCollector::MARK_COMPACTOR,
+ GarbageCollectionReason::kTesting, "collector unittest");
tracer->AddScopeSample(GCTracer::Scope::MC_MARK, 100);
- tracer->Stop(MARK_COMPACTOR);
+ tracer->Stop(GarbageCollector::MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(100.0, tracer->current_.scopes[GCTracer::Scope::MC_MARK]);
}
@@ -194,12 +194,12 @@ TEST_F(GCTracerTest, IncrementalScope) {
0.0, tracer->current_.scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]);
// Sample is added because its ScopeId is listed as incremental sample.
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 100);
- tracer->Start(MARK_COMPACTOR, GarbageCollectionReason::kTesting,
- "collector unittest");
+ tracer->Start(GarbageCollector::MARK_COMPACTOR,
+ GarbageCollectionReason::kTesting, "collector unittest");
// Switch to incremental MC to enable writing back incremental scopes.
tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 100);
- tracer->Stop(MARK_COMPACTOR);
+ tracer->Stop(GarbageCollector::MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(
200.0, tracer->current_.scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]);
}
@@ -211,15 +211,15 @@ TEST_F(GCTracerTest, IncrementalMarkingDetails) {
// Round 1.
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 50);
// Scavenger has no impact on incremental marking details.
- tracer->Start(SCAVENGER, GarbageCollectionReason::kTesting,
- "collector unittest");
- tracer->Stop(SCAVENGER);
- tracer->Start(MARK_COMPACTOR, GarbageCollectionReason::kTesting,
+ tracer->Start(GarbageCollector::SCAVENGER, GarbageCollectionReason::kTesting,
"collector unittest");
+ tracer->Stop(GarbageCollector::SCAVENGER);
+ tracer->Start(GarbageCollector::MARK_COMPACTOR,
+ GarbageCollectionReason::kTesting, "collector unittest");
// Switch to incremental MC to enable writing back incremental scopes.
tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 100);
- tracer->Stop(MARK_COMPACTOR);
+ tracer->Stop(GarbageCollector::MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(
100,
tracer->current_
@@ -239,12 +239,12 @@ TEST_F(GCTracerTest, IncrementalMarkingDetails) {
// Round 2. Numbers should be reset.
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 13);
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 15);
- tracer->Start(MARK_COMPACTOR, GarbageCollectionReason::kTesting,
- "collector unittest");
+ tracer->Start(GarbageCollector::MARK_COMPACTOR,
+ GarbageCollectionReason::kTesting, "collector unittest");
// Switch to incremental MC to enable writing back incremental scopes.
tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 122);
- tracer->Stop(MARK_COMPACTOR);
+ tracer->Stop(GarbageCollector::MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(
122,
tracer->current_
@@ -276,24 +276,24 @@ TEST_F(GCTracerTest, IncrementalMarkingSpeed) {
EXPECT_EQ(1000000 / 100,
tracer->IncrementalMarkingSpeedInBytesPerMillisecond());
// Scavenger has no impact on incremental marking details.
- tracer->Start(SCAVENGER, GarbageCollectionReason::kTesting,
+ tracer->Start(GarbageCollector::SCAVENGER, GarbageCollectionReason::kTesting,
"collector unittest");
- tracer->Stop(SCAVENGER);
+ tracer->Stop(GarbageCollector::SCAVENGER);
// 1000000 bytes in 100ms.
tracer->AddIncrementalMarkingStep(100, 1000000);
EXPECT_EQ(300, tracer->incremental_marking_duration_);
EXPECT_EQ(3000000u, tracer->incremental_marking_bytes_);
EXPECT_EQ(1000000 / 100,
tracer->IncrementalMarkingSpeedInBytesPerMillisecond());
- tracer->Start(MARK_COMPACTOR, GarbageCollectionReason::kTesting,
- "collector unittest");
+ tracer->Start(GarbageCollector::MARK_COMPACTOR,
+ GarbageCollectionReason::kTesting, "collector unittest");
// Switch to incremental MC.
tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
// 1000000 bytes in 100ms.
tracer->AddIncrementalMarkingStep(100, 1000000);
EXPECT_EQ(400, tracer->incremental_marking_duration_);
EXPECT_EQ(4000000u, tracer->incremental_marking_bytes_);
- tracer->Stop(MARK_COMPACTOR);
+ tracer->Stop(GarbageCollector::MARK_COMPACTOR);
EXPECT_EQ(400, tracer->current_.incremental_marking_duration);
EXPECT_EQ(4000000u, tracer->current_.incremental_marking_bytes);
EXPECT_EQ(0, tracer->incremental_marking_duration_);
@@ -303,11 +303,11 @@ TEST_F(GCTracerTest, IncrementalMarkingSpeed) {
// Round 2.
tracer->AddIncrementalMarkingStep(2000, 1000);
- tracer->Start(MARK_COMPACTOR, GarbageCollectionReason::kTesting,
- "collector unittest");
+ tracer->Start(GarbageCollector::MARK_COMPACTOR,
+ GarbageCollectionReason::kTesting, "collector unittest");
// Switch to incremental MC.
tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
- tracer->Stop(MARK_COMPACTOR);
+ tracer->Stop(GarbageCollector::MARK_COMPACTOR);
EXPECT_DOUBLE_EQ((4000000.0 / 400 + 1000.0 / 2000) / 2,
static_cast<double>(
tracer->IncrementalMarkingSpeedInBytesPerMillisecond()));
@@ -352,13 +352,13 @@ TEST_F(GCTracerTest, MutatorUtilization) {
TEST_F(GCTracerTest, BackgroundScavengerScope) {
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
- tracer->Start(SCAVENGER, GarbageCollectionReason::kTesting,
+ tracer->Start(GarbageCollector::SCAVENGER, GarbageCollectionReason::kTesting,
"collector unittest");
tracer->AddScopeSampleBackground(
GCTracer::Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL, 10);
tracer->AddScopeSampleBackground(
GCTracer::Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL, 1);
- tracer->Stop(SCAVENGER);
+ tracer->Stop(GarbageCollector::SCAVENGER);
EXPECT_DOUBLE_EQ(
11, tracer->current_
.scopes[GCTracer::Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL]);
@@ -367,8 +367,8 @@ TEST_F(GCTracerTest, BackgroundScavengerScope) {
TEST_F(GCTracerTest, BackgroundMinorMCScope) {
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
- tracer->Start(MINOR_MARK_COMPACTOR, GarbageCollectionReason::kTesting,
- "collector unittest");
+ tracer->Start(GarbageCollector::MINOR_MARK_COMPACTOR,
+ GarbageCollectionReason::kTesting, "collector unittest");
tracer->AddScopeSampleBackground(GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING,
10);
tracer->AddScopeSampleBackground(GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING,
@@ -381,7 +381,7 @@ TEST_F(GCTracerTest, BackgroundMinorMCScope) {
GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 30);
tracer->AddScopeSampleBackground(
GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 3);
- tracer->Stop(MINOR_MARK_COMPACTOR);
+ tracer->Stop(GarbageCollector::MINOR_MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(
11,
tracer->current_.scopes[GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING]);
@@ -401,14 +401,14 @@ TEST_F(GCTracerTest, BackgroundMajorMCScope) {
200);
tracer->AddScopeSampleBackground(GCTracer::Scope::MC_BACKGROUND_MARKING, 10);
// Scavenger should not affect the major mark-compact scopes.
- tracer->Start(SCAVENGER, GarbageCollectionReason::kTesting,
+ tracer->Start(GarbageCollector::SCAVENGER, GarbageCollectionReason::kTesting,
"collector unittest");
- tracer->Stop(SCAVENGER);
+ tracer->Stop(GarbageCollector::SCAVENGER);
tracer->AddScopeSampleBackground(GCTracer::Scope::MC_BACKGROUND_SWEEPING, 20);
tracer->AddScopeSampleBackground(GCTracer::Scope::MC_BACKGROUND_MARKING, 1);
tracer->AddScopeSampleBackground(GCTracer::Scope::MC_BACKGROUND_SWEEPING, 2);
- tracer->Start(MARK_COMPACTOR, GarbageCollectionReason::kTesting,
- "collector unittest");
+ tracer->Start(GarbageCollector::MARK_COMPACTOR,
+ GarbageCollectionReason::kTesting, "collector unittest");
tracer->AddScopeSampleBackground(GCTracer::Scope::MC_BACKGROUND_EVACUATE_COPY,
30);
tracer->AddScopeSampleBackground(GCTracer::Scope::MC_BACKGROUND_EVACUATE_COPY,
@@ -417,7 +417,7 @@ TEST_F(GCTracerTest, BackgroundMajorMCScope) {
GCTracer::Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 40);
tracer->AddScopeSampleBackground(
GCTracer::Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS, 4);
- tracer->Stop(MARK_COMPACTOR);
+ tracer->Stop(GarbageCollector::MARK_COMPACTOR);
EXPECT_DOUBLE_EQ(
111, tracer->current_.scopes[GCTracer::Scope::MC_BACKGROUND_MARKING]);
EXPECT_DOUBLE_EQ(
diff --git a/deps/v8/test/unittests/heap/heap-unittest.cc b/deps/v8/test/unittests/heap/heap-unittest.cc
index 9247109930..129b1702f6 100644
--- a/deps/v8/test/unittests/heap/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-unittest.cc
@@ -22,8 +22,6 @@ namespace internal {
using HeapTest = TestWithContext;
TEST(Heap, YoungGenerationSizeFromOldGenerationSize) {
- const size_t MB = static_cast<size_t>(i::MB);
- const size_t KB = static_cast<size_t>(i::KB);
const size_t pm = i::Heap::kPointerMultiplier;
const size_t hlm = i::Heap::kHeapLimitMultiplier;
ASSERT_EQ(3 * 512u * pm * KB,
@@ -38,8 +36,6 @@ TEST(Heap, YoungGenerationSizeFromOldGenerationSize) {
}
TEST(Heap, GenerationSizesFromHeapSize) {
- const size_t MB = static_cast<size_t>(i::MB);
- const size_t KB = static_cast<size_t>(i::KB);
const size_t pm = i::Heap::kPointerMultiplier;
const size_t hlm = i::Heap::kHeapLimitMultiplier;
size_t old, young;
@@ -50,7 +46,7 @@ TEST(Heap, GenerationSizesFromHeapSize) {
i::Heap::GenerationSizesFromHeapSize(1 * KB + 3 * 512u * pm * KB, &young,
&old);
- ASSERT_EQ(1 * KB, old);
+ ASSERT_EQ(1u * KB, old);
ASSERT_EQ(3 * 512u * pm * KB, young);
i::Heap::GenerationSizesFromHeapSize(128 * hlm * MB + 3 * 512 * pm * KB,
@@ -75,7 +71,6 @@ TEST(Heap, GenerationSizesFromHeapSize) {
}
TEST(Heap, HeapSizeFromPhysicalMemory) {
- const size_t MB = static_cast<size_t>(i::MB);
const size_t pm = i::Heap::kPointerMultiplier;
const size_t hlm = i::Heap::kHeapLimitMultiplier;
diff --git a/deps/v8/test/unittests/heap/local-heap-unittest.cc b/deps/v8/test/unittests/heap/local-heap-unittest.cc
index 92b5eef8dc..a009d9433c 100644
--- a/deps/v8/test/unittests/heap/local-heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/local-heap-unittest.cc
@@ -132,8 +132,8 @@ class BackgroundThreadForGCEpilogue final : public v8::base::Thread {
unparked_scope.emplace(&lh);
}
{
- base::Optional<UnparkedScope> unparked_scope;
- if (parked_) unparked_scope.emplace(&lh);
+ base::Optional<UnparkedScope> nested_unparked_scope;
+ if (parked_) nested_unparked_scope.emplace(&lh);
lh.AddGCEpilogueCallback(&GCEpilogue::Callback, epilogue_);
}
epilogue_->NotifyStarted();
@@ -141,8 +141,8 @@ class BackgroundThreadForGCEpilogue final : public v8::base::Thread {
lh.Safepoint();
}
{
- base::Optional<UnparkedScope> unparked_scope;
- if (parked_) unparked_scope.emplace(&lh);
+ base::Optional<UnparkedScope> nested_unparked_scope;
+ if (parked_) nested_unparked_scope.emplace(&lh);
lh.RemoveGCEpilogueCallback(&GCEpilogue::Callback, epilogue_);
}
}
diff --git a/deps/v8/test/unittests/heap/safepoint-unittest.cc b/deps/v8/test/unittests/heap/safepoint-unittest.cc
index 8cd21c1bed..d7bfdda2cd 100644
--- a/deps/v8/test/unittests/heap/safepoint-unittest.cc
+++ b/deps/v8/test/unittests/heap/safepoint-unittest.cc
@@ -82,7 +82,7 @@ TEST_F(SafepointTest, StopParkedThreads) {
CHECK_EQ(safepoints, kRuns);
}
-static const int kRuns = 10000;
+static const int kIterations = 10000;
class RunningThread final : public v8::base::Thread {
public:
@@ -95,7 +95,7 @@ class RunningThread final : public v8::base::Thread {
LocalHeap local_heap(heap_, ThreadKind::kBackground);
UnparkedScope unparked_scope(&local_heap);
- for (int i = 0; i < kRuns; i++) {
+ for (int i = 0; i < kIterations; i++) {
counter_->fetch_add(1);
if (i % 100 == 0) local_heap.Safepoint();
}
diff --git a/deps/v8/test/unittests/heap/spaces-unittest.cc b/deps/v8/test/unittests/heap/spaces-unittest.cc
index 8b732f3ea2..73011ef54f 100644
--- a/deps/v8/test/unittests/heap/spaces-unittest.cc
+++ b/deps/v8/test/unittests/heap/spaces-unittest.cc
@@ -189,15 +189,16 @@ TEST_F(SpacesTest, FreeListManySelectFreeListCategoryType) {
}
for (size_t size : sizes) {
- FreeListCategoryType cat = free_list.SelectFreeListCategoryType(size);
- if (cat == free_list.last_category_) {
- // If cat == last_category, then we make sure that |size| indeeds fits
- // in the last category.
- EXPECT_LE(free_list.categories_min[cat], size);
+ FreeListCategoryType selected =
+ free_list.SelectFreeListCategoryType(size);
+ if (selected == free_list.last_category_) {
+ // If selected == last_category, then we make sure that |size| indeeds
+ // fits in the last category.
+ EXPECT_LE(free_list.categories_min[selected], size);
} else {
- // Otherwise, size should fit in |cat|, but not in |cat+1|.
- EXPECT_LE(free_list.categories_min[cat], size);
- EXPECT_LT(size, free_list.categories_min[cat + 1]);
+ // Otherwise, size should fit in |selected|, but not in |selected+1|.
+ EXPECT_LE(free_list.categories_min[selected], size);
+ EXPECT_LT(size, free_list.categories_min[selected + 1]);
}
}
}
@@ -268,25 +269,26 @@ TEST_F(SpacesTest,
}
for (size_t size : sizes) {
- FreeListCategoryType cat =
+ FreeListCategoryType selected =
free_list.SelectFastAllocationFreeListCategoryType(size);
if (size <= FreeListManyCachedFastPath::kTinyObjectMaxSize) {
// For tiny objects, the first category of the fast path should be
// chosen.
- EXPECT_TRUE(cat == FreeListManyCachedFastPath::kFastPathFirstCategory);
+ EXPECT_TRUE(selected ==
+ FreeListManyCachedFastPath::kFastPathFirstCategory);
} else if (size >= free_list.categories_min[free_list.last_category_] -
FreeListManyCachedFastPath::kFastPathOffset) {
// For objects close to the minimum of the last category, the last
// category is chosen.
- EXPECT_EQ(cat, free_list.last_category_);
+ EXPECT_EQ(selected, free_list.last_category_);
} else {
// For other objects, the chosen category must satisfy that its minimum
// is at least |size|+1.85k.
- EXPECT_GE(free_list.categories_min[cat],
+ EXPECT_GE(free_list.categories_min[selected],
size + FreeListManyCachedFastPath::kFastPathOffset);
// And the smaller categoriy's minimum is less than |size|+1.85k
// (otherwise it would have been chosen instead).
- EXPECT_LT(free_list.categories_min[cat - 1],
+ EXPECT_LT(free_list.categories_min[selected - 1],
size + FreeListManyCachedFastPath::kFastPathOffset);
}
}
diff --git a/deps/v8/test/unittests/heap/unified-heap-unittest.cc b/deps/v8/test/unittests/heap/unified-heap-unittest.cc
index 28edd79d40..5f9bbc3e42 100644
--- a/deps/v8/test/unittests/heap/unified-heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/unified-heap-unittest.cc
@@ -147,7 +147,7 @@ class Unreferenced : public cppgc::GarbageCollected<Unreferenced> {
} // namespace
TEST_F(UnifiedHeapTest, FreeUnreferencedDuringNoGcScope) {
- v8::HandleScope scope(v8_isolate());
+ v8::HandleScope handle_scope(v8_isolate());
v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
v8::Context::Scope context_scope(context);
auto* unreferenced = cppgc::MakeGarbageCollected<Unreferenced>(
@@ -156,7 +156,7 @@ TEST_F(UnifiedHeapTest, FreeUnreferencedDuringNoGcScope) {
// Force safepoint to force flushing of cached allocated/freed sizes in cppgc.
cpp_heap().stats_collector()->NotifySafePointForTesting();
{
- cppgc::subtle::NoGarbageCollectionScope scope(cpp_heap());
+ cppgc::subtle::NoGarbageCollectionScope no_gc_scope(cpp_heap());
cppgc::internal::FreeUnreferencedObject(cpp_heap(), unreferenced);
// Force safepoint to make sure allocated size decrease due to freeing
// unreferenced object is reported to CppHeap. Due to
@@ -177,12 +177,12 @@ TEST_F(UnifiedHeapTest, FreeUnreferencedDuringNoGcScope) {
#if !V8_OS_FUCHSIA
TEST_F(UnifiedHeapTest, TracedReferenceRetainsFromStack) {
- v8::HandleScope scope(v8_isolate());
+ v8::HandleScope handle_scope(v8_isolate());
v8::Local<v8::Context> context = v8::Context::New(v8_isolate());
v8::Context::Scope context_scope(context);
TracedReference<v8::Object> holder;
{
- v8::HandleScope scope(v8_isolate());
+ v8::HandleScope inner_handle_scope(v8_isolate());
auto local = v8::Object::New(v8_isolate());
EXPECT_TRUE(local->IsObject());
holder.Reset(v8_isolate(), local);
diff --git a/deps/v8/test/unittests/heap/unmapper-unittest.cc b/deps/v8/test/unittests/heap/unmapper-unittest.cc
index 7a4cead569..39368398e9 100644
--- a/deps/v8/test/unittests/heap/unmapper-unittest.cc
+++ b/deps/v8/test/unittests/heap/unmapper-unittest.cc
@@ -250,17 +250,19 @@ class SequentialUnmapperTest : public TestWithIsolate {
SetPlatformPageAllocatorForTesting(tracking_page_allocator_));
old_flag_ = i::FLAG_concurrent_sweeping;
i::FLAG_concurrent_sweeping = false;
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ // Reinitialize the process-wide pointer cage so it can pick up the
+ // TrackingPageAllocator.
+ // The pointer cage must be destroyed before the virtual memory cage.
+ IsolateAllocator::FreeProcessWidePtrComprCageForTesting();
#ifdef V8_VIRTUAL_MEMORY_CAGE
+ // Reinitialze the virtual memory cage so it uses the TrackingPageAllocator.
GetProcessWideVirtualMemoryCage()->TearDown();
constexpr bool use_guard_regions = false;
CHECK(GetProcessWideVirtualMemoryCage()->Initialize(
tracking_page_allocator_, kVirtualMemoryCageMinimumSize,
use_guard_regions));
#endif
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
- // Reinitialize the process-wide pointer cage so it can pick up the
- // TrackingPageAllocator.
- IsolateAllocator::FreeProcessWidePtrComprCageForTesting();
IsolateAllocator::InitializeOncePerProcess();
#endif
TestWithIsolate::SetUpTestCase();
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 8838b0b94a..bfb4b3271e 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -713,11 +713,11 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
.JumpLoop(&loop_header, 0, 0)
.Bind(&after_loop);
for (int i = 0; i < 42; i++) {
- BytecodeLabel after_loop;
+ BytecodeLabel also_after_loop;
// Conditional jump to force the code after the JumpLoop to be live.
- builder.JumpIfNull(&after_loop)
+ builder.JumpIfNull(&also_after_loop)
.JumpLoop(&loop_header, 0, 0)
- .Bind(&after_loop);
+ .Bind(&also_after_loop);
}
// Add padding to force wide backwards jumps.
diff --git a/deps/v8/test/unittests/regexp/regexp-unittest.cc b/deps/v8/test/unittests/regexp/regexp-unittest.cc
index 32c981b269..5f06d13567 100644
--- a/deps/v8/test/unittests/regexp/regexp-unittest.cc
+++ b/deps/v8/test/unittests/regexp/regexp-unittest.cc
@@ -13,7 +13,7 @@ TEST_F(TestWithNativeContext, ConvertRegExpFlagsToString) {
Handle<JSRegExp> regexp = RunJS<JSRegExp>("regexp");
Handle<String> flags = RunJS<String>("regexp.flags");
Handle<String> converted_flags =
- JSRegExp::StringFromFlags(isolate(), regexp->GetFlags());
+ JSRegExp::StringFromFlags(isolate(), regexp->flags());
EXPECT_TRUE(String::Equals(isolate(), flags, converted_flags));
}
@@ -22,7 +22,7 @@ TEST_F(TestWithNativeContext, ConvertRegExpFlagsToStringNoFlags) {
Handle<JSRegExp> regexp = RunJS<JSRegExp>("regexp");
Handle<String> flags = RunJS<String>("regexp.flags");
Handle<String> converted_flags =
- JSRegExp::StringFromFlags(isolate(), regexp->GetFlags());
+ JSRegExp::StringFromFlags(isolate(), regexp->flags());
EXPECT_TRUE(String::Equals(isolate(), flags, converted_flags));
}
@@ -31,7 +31,7 @@ TEST_F(TestWithNativeContext, ConvertRegExpFlagsToStringAllFlags) {
Handle<JSRegExp> regexp = RunJS<JSRegExp>("regexp");
Handle<String> flags = RunJS<String>("regexp.flags");
Handle<String> converted_flags =
- JSRegExp::StringFromFlags(isolate(), regexp->GetFlags());
+ JSRegExp::StringFromFlags(isolate(), regexp->flags());
EXPECT_TRUE(String::Equals(isolate(), flags, converted_flags));
}
diff --git a/deps/v8/test/unittests/torque/ls-message-unittest.cc b/deps/v8/test/unittests/torque/ls-message-unittest.cc
index c6779f978d..063450f5fa 100644
--- a/deps/v8/test/unittests/torque/ls-message-unittest.cc
+++ b/deps/v8/test/unittests/torque/ls-message-unittest.cc
@@ -86,8 +86,11 @@ TEST(LanguageServerMessage, GotoDefinition) {
SourceId definition_id = SourceFileMap::AddSource("file://base.tq");
LanguageServerData::Scope server_data_scope;
- LanguageServerData::AddDefinition({test_id, {1, 0}, {1, 10}},
- {definition_id, {4, 1}, {4, 5}});
+ LanguageServerData::AddDefinition(
+ {test_id, LineAndColumn::WithUnknownOffset(1, 0),
+ LineAndColumn::WithUnknownOffset(1, 10)},
+ {definition_id, LineAndColumn::WithUnknownOffset(4, 1),
+ LineAndColumn::WithUnknownOffset(4, 5)});
// First, check a unknown definition. The result must be null.
GotoDefinitionRequest request;
@@ -171,8 +174,10 @@ TEST(LanguageServerMessage, LintErrorSendsDiagnostics) {
// No compilation errors but two lint warnings.
{
- SourcePosition pos1{test_id, {0, 0}, {0, 1}};
- SourcePosition pos2{test_id, {1, 0}, {1, 1}};
+ SourcePosition pos1{test_id, LineAndColumn::WithUnknownOffset(0, 0),
+ LineAndColumn::WithUnknownOffset(0, 1)};
+ SourcePosition pos2{test_id, LineAndColumn::WithUnknownOffset(1, 0),
+ LineAndColumn::WithUnknownOffset(1, 1)};
Lint("lint error 1").Position(pos1);
Lint("lint error 2").Position(pos2);
}
diff --git a/deps/v8/test/unittests/torque/ls-server-data-unittest.cc b/deps/v8/test/unittests/torque/ls-server-data-unittest.cc
index 28efbe0195..f937958857 100644
--- a/deps/v8/test/unittests/torque/ls-server-data-unittest.cc
+++ b/deps/v8/test/unittests/torque/ls-server-data-unittest.cc
@@ -43,14 +43,20 @@ TEST(LanguageServer, GotoTypeDefinition) {
// Find the definition for type 'T1' of argument 'a' on line 4.
const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
- auto maybe_position = LanguageServerData::FindDefinition(id, {4, 19});
+ auto maybe_position = LanguageServerData::FindDefinition(
+ id, LineAndColumn::WithUnknownOffset(4, 19));
ASSERT_TRUE(maybe_position.has_value());
- EXPECT_EQ(*maybe_position, (SourcePosition{id, {2, 5}, {2, 7}}));
+ EXPECT_EQ(*maybe_position,
+ (SourcePosition{id, LineAndColumn::WithUnknownOffset(2, 5),
+ LineAndColumn::WithUnknownOffset(2, 7)}));
// Find the defintion for type 'T2' of argument 'b' on line 4.
- maybe_position = LanguageServerData::FindDefinition(id, {4, 26});
+ maybe_position = LanguageServerData::FindDefinition(
+ id, LineAndColumn::WithUnknownOffset(4, 26));
ASSERT_TRUE(maybe_position.has_value());
- EXPECT_EQ(*maybe_position, (SourcePosition{id, {3, 5}, {3, 7}}));
+ EXPECT_EQ(*maybe_position,
+ (SourcePosition{id, LineAndColumn::WithUnknownOffset(3, 5),
+ LineAndColumn::WithUnknownOffset(3, 7)}));
}
TEST(LanguageServer, GotoTypeDefinitionExtends) {
@@ -65,9 +71,12 @@ TEST(LanguageServer, GotoTypeDefinitionExtends) {
// Find the definition for 'T1' of the extends clause on line 3.
const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
- auto maybe_position = LanguageServerData::FindDefinition(id, {3, 16});
+ auto maybe_position = LanguageServerData::FindDefinition(
+ id, LineAndColumn::WithUnknownOffset(3, 16));
ASSERT_TRUE(maybe_position.has_value());
- EXPECT_EQ(*maybe_position, (SourcePosition{id, {2, 5}, {2, 7}}));
+ EXPECT_EQ(*maybe_position,
+ (SourcePosition{id, LineAndColumn::WithUnknownOffset(2, 5),
+ LineAndColumn::WithUnknownOffset(2, 7)}));
}
TEST(LanguageServer, GotoTypeDefinitionNoDataForFile) {
@@ -76,7 +85,8 @@ TEST(LanguageServer, GotoTypeDefinitionNoDataForFile) {
SourceId test_id = SourceFileMap::AddSource("test.tq");
// Regression test, this step should not crash.
- EXPECT_FALSE(LanguageServerData::FindDefinition(test_id, {0, 0}));
+ EXPECT_FALSE(LanguageServerData::FindDefinition(
+ test_id, LineAndColumn::WithUnknownOffset(0, 0)));
}
// TODO(almuthanna): This test was skipped because it causes a crash when it is
@@ -90,7 +100,7 @@ TEST(LanguageServer, GotoLabelDefinitionInSignature) {
"macro Foo(): never labels Fail {\n"
" goto Fail;\n"
"}\n"
- "macro Bar() labels Bailout {\n"
+ "macro Bar(): void labels Bailout {\n"
" Foo() otherwise Bailout;\n"
"}\n";
@@ -99,9 +109,12 @@ TEST(LanguageServer, GotoLabelDefinitionInSignature) {
// Find the definition for 'Bailout' of the otherwise clause on line 6.
const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
- auto maybe_position = LanguageServerData::FindDefinition(id, {6, 18});
+ auto maybe_position = LanguageServerData::FindDefinition(
+ id, LineAndColumn::WithUnknownOffset(6, 18));
ASSERT_TRUE(maybe_position.has_value());
- EXPECT_EQ(*maybe_position, (SourcePosition{id, {5, 19}, {5, 26}}));
+ EXPECT_EQ(*maybe_position,
+ (SourcePosition{id, LineAndColumn::WithUnknownOffset(5, 25),
+ LineAndColumn::WithUnknownOffset(5, 32)}));
}
#endif
@@ -112,7 +125,7 @@ TEST(LanguageServer, GotoLabelDefinitionInTryBlock) {
"macro Foo(): never labels Fail {\n"
" goto Fail;\n"
"}\n"
- "macro Bar() {\n"
+ "macro Bar(): void {\n"
" try { Foo() otherwise Bailout; }\n"
" label Bailout {}\n"
"}\n";
@@ -122,9 +135,12 @@ TEST(LanguageServer, GotoLabelDefinitionInTryBlock) {
// Find the definition for 'Bailout' of the otherwise clause on line 6.
const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
- auto maybe_position = LanguageServerData::FindDefinition(id, {6, 25});
+ auto maybe_position = LanguageServerData::FindDefinition(
+ id, LineAndColumn::WithUnknownOffset(6, 25));
ASSERT_TRUE(maybe_position.has_value());
- EXPECT_EQ(*maybe_position, (SourcePosition{id, {7, 8}, {7, 15}}));
+ EXPECT_EQ(*maybe_position,
+ (SourcePosition{id, LineAndColumn::WithUnknownOffset(7, 8),
+ LineAndColumn::WithUnknownOffset(7, 15)}));
}
// TODO(almuthanna): This test was skipped because it causes a crash when it is
@@ -143,9 +159,12 @@ TEST(LanguageServer, GotoDefinitionClassSuperType) {
// Find the definition for 'Tagged' of the 'extends' on line 3.
const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
- auto maybe_position = LanguageServerData::FindDefinition(id, {3, 33});
+ auto maybe_position = LanguageServerData::FindDefinition(
+ id, LineAndColumn::WithUnknownOffset(3, 33));
ASSERT_TRUE(maybe_position.has_value());
- EXPECT_EQ(*maybe_position, (SourcePosition{id, {2, 5}, {2, 11}}));
+ EXPECT_EQ(*maybe_position,
+ (SourcePosition{id, LineAndColumn::WithUnknownOffset(2, 5),
+ LineAndColumn::WithUnknownOffset(2, 11)}));
}
#endif
@@ -162,16 +181,19 @@ TEST(LanguageServer, GotoLabelDefinitionInSignatureGotoStmt) {
// Find the definition for 'Fail' of the goto statement on line 3.
const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
- auto maybe_position = LanguageServerData::FindDefinition(id, {3, 7});
+ auto maybe_position = LanguageServerData::FindDefinition(
+ id, LineAndColumn::WithUnknownOffset(3, 7));
ASSERT_TRUE(maybe_position.has_value());
- EXPECT_EQ(*maybe_position, (SourcePosition{id, {2, 26}, {2, 30}}));
+ EXPECT_EQ(*maybe_position,
+ (SourcePosition{id, LineAndColumn::WithUnknownOffset(2, 26),
+ LineAndColumn::WithUnknownOffset(2, 30)}));
}
TEST(LanguageServer, GotoLabelDefinitionInTryBlockGoto) {
const std::string source =
"type void;\n"
"type never;\n"
- "macro Bar() {\n"
+ "macro Bar(): void {\n"
" try { goto Bailout; }\n"
" label Bailout {}\n"
"}\n";
@@ -181,9 +203,12 @@ TEST(LanguageServer, GotoLabelDefinitionInTryBlockGoto) {
// Find the definition for 'Bailout' of the goto statement on line 3.
const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
- auto maybe_position = LanguageServerData::FindDefinition(id, {3, 13});
+ auto maybe_position = LanguageServerData::FindDefinition(
+ id, LineAndColumn::WithUnknownOffset(3, 13));
ASSERT_TRUE(maybe_position.has_value());
- EXPECT_EQ(*maybe_position, (SourcePosition{id, {4, 8}, {4, 15}}));
+ EXPECT_EQ(*maybe_position,
+ (SourcePosition{id, LineAndColumn::WithUnknownOffset(4, 8),
+ LineAndColumn::WithUnknownOffset(4, 15)}));
}
TEST(LanguageServer, GotoLabelDefinitionGotoInOtherwise) {
@@ -193,7 +218,7 @@ TEST(LanguageServer, GotoLabelDefinitionGotoInOtherwise) {
"macro Foo(): never labels Fail {\n"
" goto Fail;\n"
"}\n"
- "macro Bar() {\n"
+ "macro Bar(): void {\n"
" try { Foo() otherwise goto Bailout; }\n"
" label Bailout {}\n"
"}\n";
@@ -203,9 +228,12 @@ TEST(LanguageServer, GotoLabelDefinitionGotoInOtherwise) {
// Find the definition for 'Bailout' of the otherwise clause on line 6.
const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
- auto maybe_position = LanguageServerData::FindDefinition(id, {6, 30});
+ auto maybe_position = LanguageServerData::FindDefinition(
+ id, LineAndColumn::WithUnknownOffset(6, 30));
ASSERT_TRUE(maybe_position.has_value());
- EXPECT_EQ(*maybe_position, (SourcePosition{id, {7, 8}, {7, 15}}));
+ EXPECT_EQ(*maybe_position,
+ (SourcePosition{id, LineAndColumn::WithUnknownOffset(7, 8),
+ LineAndColumn::WithUnknownOffset(7, 15)}));
}
TEST(LanguageServer, SymbolsArePopulated) {
diff --git a/deps/v8/test/unittests/torque/torque-unittest.cc b/deps/v8/test/unittests/torque/torque-unittest.cc
index cd62f37083..480342332c 100644
--- a/deps/v8/test/unittests/torque/torque-unittest.cc
+++ b/deps/v8/test/unittests/torque/torque-unittest.cc
@@ -224,7 +224,8 @@ using SubstrWithPosition =
SubstrWithPosition SubstrTester(const std::string& message, int line, int col) {
// Change line and column from 1-based to 0-based.
return {::testing::HasSubstr(message),
- LineAndColumn{line + CountPreludeLines() - 1, col - 1}};
+ LineAndColumn::WithUnknownOffset(line + CountPreludeLines() - 1,
+ col - 1)};
}
#endif
@@ -275,7 +276,7 @@ TEST(Torque, ClassDefinition) {
@export
macro TestClassWithAllTypesLoadsAndStores(
t: TestClassWithAllTypes, r: RawPtr, v1: int8, v2: uint8, v3: int16,
- v4: uint16, v5: int32, v6: uint32, v7: intptr, v8: uintptr) {
+ v4: uint16, v5: int32, v6: uint32, v7: intptr, v8: uintptr): void {
t.a = v1;
t.b = v2;
t.c = v3;
@@ -354,7 +355,7 @@ TEST(Torque, ConditionalFields) {
TEST(Torque, ConstexprLetBindingDoesNotCrash) {
ExpectFailingCompilation(
- R"(@export macro FooBar() { let foo = 0; check(foo >= 0); })",
+ R"(@export macro FooBar(): void { let foo = 0; check(foo >= 0); })",
HasSubstr("Use 'const' instead of 'let' for variable 'foo'"));
}
@@ -365,10 +366,10 @@ TEST(Torque, FailedImplicitCastFromConstexprDoesNotCrash) {
kValue,
...
}
- macro Foo() {
+ macro Foo(): void {
Bar(SomeEnum::kValue);
}
- macro Bar<T: type>(value: T) {}
+ macro Bar<T: type>(value: T): void {}
)",
HasSubstr(
"Cannot find non-constexpr type corresponding to constexpr kValue"));
@@ -376,7 +377,7 @@ TEST(Torque, FailedImplicitCastFromConstexprDoesNotCrash) {
TEST(Torque, DoubleUnderScorePrefixIllegalForIdentifiers) {
ExpectFailingCompilation(R"(
- @export macro Foo() {
+ @export macro Foo(): void {
let __x;
}
)",
@@ -386,7 +387,7 @@ TEST(Torque, DoubleUnderScorePrefixIllegalForIdentifiers) {
TEST(Torque, UnusedLetBindingLintError) {
ExpectFailingCompilation(R"(
- @export macro Foo(y: Smi) {
+ @export macro Foo(y: Smi): void {
let x: Smi = y;
}
)",
@@ -395,7 +396,7 @@ TEST(Torque, UnusedLetBindingLintError) {
TEST(Torque, UnderscorePrefixSilencesUnusedWarning) {
ExpectSuccessfulCompilation(R"(
- @export macro Foo(y: Smi) {
+ @export macro Foo(y: Smi): void {
let _x: Smi = y;
}
)");
@@ -407,7 +408,7 @@ TEST(Torque, UnderscorePrefixSilencesUnusedWarning) {
#if !defined(V8_TARGET_OS_FUCHSIA)
TEST(Torque, UsingUnderscorePrefixedIdentifierError) {
ExpectFailingCompilation(R"(
- @export macro Foo(y: Smi) {
+ @export macro Foo(y: Smi): void {
let _x: Smi = y;
check(_x == y);
}
@@ -418,40 +419,40 @@ TEST(Torque, UsingUnderscorePrefixedIdentifierError) {
TEST(Torque, UnusedArgumentLintError) {
ExpectFailingCompilation(R"(
- @export macro Foo(x: Smi) {}
+ @export macro Foo(x: Smi): void {}
)",
HasSubstr("Variable 'x' is never used."));
}
TEST(Torque, UsingUnderscorePrefixedArgumentSilencesWarning) {
ExpectSuccessfulCompilation(R"(
- @export macro Foo(_y: Smi) {}
+ @export macro Foo(_y: Smi): void {}
)");
}
TEST(Torque, UnusedLabelLintError) {
ExpectFailingCompilation(R"(
- @export macro Foo() labels Bar {}
+ @export macro Foo(): void labels Bar {}
)",
HasSubstr("Label 'Bar' is never used."));
}
TEST(Torque, UsingUnderScorePrefixLabelSilencesWarning) {
ExpectSuccessfulCompilation(R"(
- @export macro Foo() labels _Bar {}
+ @export macro Foo(): void labels _Bar {}
)");
}
TEST(Torque, NoUnusedWarningForImplicitArguments) {
ExpectSuccessfulCompilation(R"(
- @export macro Foo(implicit c: Context, r: JSReceiver)() {}
+ @export macro Foo(implicit c: Context, r: JSReceiver)(): void {}
)");
}
-TEST(Torque, NoUnusedWarningForVariablesOnlyUsedInAsserts) {
+TEST(Torque, NoUnusedWarningForVariablesOnlyUsedInDchecks) {
ExpectSuccessfulCompilation(R"(
- @export macro Foo(x: bool) {
- assert(x);
+ @export macro Foo(x: bool): void {
+ dcheck(x);
}
)");
}
@@ -492,12 +493,12 @@ TEST(Torque, LetShouldBeConstIsSkippedForStructs) {
TEST(Torque, GenericAbstractType) {
ExpectSuccessfulCompilation(R"(
type Foo<T: type> extends HeapObject;
- extern macro F1(HeapObject);
- macro F2<T: type>(x: Foo<T>) {
+ extern macro F1(HeapObject): void;
+ macro F2<T: type>(x: Foo<T>): void {
F1(x);
}
@export
- macro F3(a: Foo<Smi>, b: Foo<HeapObject>){
+ macro F3(a: Foo<Smi>, b: Foo<HeapObject>): void {
F2(a);
F2(b);
}
@@ -505,18 +506,18 @@ TEST(Torque, GenericAbstractType) {
ExpectFailingCompilation(R"(
type Foo<T: type> extends HeapObject;
- macro F1<T: type>(x: Foo<T>) {}
+ macro F1<T: type>(x: Foo<T>): void {}
@export
- macro F2(a: Foo<Smi>) {
+ macro F2(a: Foo<Smi>): void {
F1<HeapObject>(a);
})",
HasSubstr("cannot find suitable callable"));
ExpectFailingCompilation(R"(
type Foo<T: type> extends HeapObject;
- extern macro F1(Foo<HeapObject>);
+ extern macro F1(Foo<HeapObject>): void;
@export
- macro F2(a: Foo<Smi>) {
+ macro F2(a: Foo<Smi>): void {
F1(a);
})",
HasSubstr("cannot find suitable callable"));
@@ -525,14 +526,14 @@ TEST(Torque, GenericAbstractType) {
TEST(Torque, SpecializationRequesters) {
ExpectFailingCompilation(
R"(
- macro A<T: type extends HeapObject>() {}
- macro B<T: type>() {
+ macro A<T: type extends HeapObject>(): void {}
+ macro B<T: type>(): void {
A<T>();
}
- macro C<T: type>() {
+ macro C<T: type>(): void {
B<T>();
}
- macro D() {
+ macro D(): void {
C<Smi>();
}
)",
@@ -585,16 +586,16 @@ TEST(Torque, SpecializationRequesters) {
ExpectFailingCompilation(
R"(
- macro A<T: type extends HeapObject>() {}
- macro B<T: type>() {
+ macro A<T: type extends HeapObject>(): void {}
+ macro B<T: type>(): void {
A<T>();
}
struct C<T: type> {
- macro Method() {
+ macro Method(): void {
B<T>();
}
}
- macro D(_b: C<Smi>) {}
+ macro D(_b: C<Smi>): void {}
)",
SubstrVector{
SubstrTester("cannot find suitable callable", 4, 7),
@@ -690,7 +691,7 @@ TEST(Torque, EnumTypeAnnotations) {
kValue2: Type2,
kValue3
}
- @export macro Foo() {
+ @export macro Foo(): void {
const _a: Type1 = MyEnum::kValue1;
const _b: Type2 = MyEnum::kValue2;
const _c: intptr = MyEnum::kValue3;
@@ -706,7 +707,7 @@ TEST(Torque, ConstClassFields) {
}
@export
- macro Test(implicit context: Context)(o: Foo, n: int32) {
+ macro Test(implicit context: Context)(o: Foo, n: int32): void {
const _x: int32 = o.x;
o.y = n;
}
@@ -718,7 +719,7 @@ TEST(Torque, ConstClassFields) {
}
@export
- macro Test(implicit context: Context)(o: Foo, n: int32) {
+ macro Test(implicit context: Context)(o: Foo, n: int32): void {
o.x = n;
}
)",
@@ -734,7 +735,7 @@ TEST(Torque, ConstClassFields) {
}
@export
- macro Test(implicit context: Context)(o: Foo, n: int32) {
+ macro Test(implicit context: Context)(o: Foo, n: int32): void {
const _x: int32 = o.s.x;
// Assigning a struct as a value is OK, even when the struct contains
// const fields.
@@ -753,7 +754,7 @@ TEST(Torque, ConstClassFields) {
}
@export
- macro Test(implicit context: Context)(o: Foo, n: int32) {
+ macro Test(implicit context: Context)(o: Foo, n: int32): void {
o.s.y = n;
}
)",
@@ -769,7 +770,7 @@ TEST(Torque, ConstClassFields) {
}
@export
- macro Test(implicit context: Context)(o: Foo, n: int32) {
+ macro Test(implicit context: Context)(o: Foo, n: int32): void {
o.s.x = n;
}
)",
@@ -784,7 +785,7 @@ TEST(Torque, References) {
}
@export
- macro Test(implicit context: Context)(o: Foo, n: int32) {
+ macro Test(implicit context: Context)(o: Foo, n: int32): void {
const constRefX: const &int32 = &o.x;
const refY: &int32 = &o.y;
const constRefY: const &int32 = refY;
@@ -804,7 +805,7 @@ TEST(Torque, References) {
}
@export
- macro Test(implicit context: Context)(o: Foo) {
+ macro Test(implicit context: Context)(o: Foo): void {
const _refX: &int32 = &o.x;
}
)",
@@ -818,7 +819,7 @@ TEST(Torque, References) {
}
@export
- macro Test(implicit context: Context)(o: Foo, n: int32) {
+ macro Test(implicit context: Context)(o: Foo, n: int32): void {
const constRefX: const &int32 = &o.x;
*constRefX = n;
}
@@ -830,7 +831,7 @@ TEST(Torque, CatchFirstHandler) {
ExpectFailingCompilation(
R"(
@export
- macro Test() {
+ macro Test(): void {
try {
} label Foo {
} catch (e) {}
@@ -875,14 +876,14 @@ TEST(Torque, UnusedImplicit) {
@export
macro Test1(implicit c: Smi)(a: Object): Object { return a; }
@export
- macro Test2(b: Object) { Test1(b); }
+ macro Test2(b: Object): void { Test1(b); }
)");
ExpectFailingCompilation(
R"(
macro Test1(implicit c: Smi)(_a: Object): Smi { return c; }
@export
- macro Test2(b: Smi) { Test1(b); }
+ macro Test2(b: Smi): void { Test1(b); }
)",
HasSubstr("undefined expression of type Smi: the implicit "
"parameter 'c' is not defined when invoking Test1 at"));
@@ -891,7 +892,7 @@ TEST(Torque, UnusedImplicit) {
R"(
extern macro Test3(implicit c: Smi)(Object): Smi;
@export
- macro Test4(b: Smi) { Test3(b); }
+ macro Test4(b: Smi): void { Test3(b); }
)",
HasSubstr("unititialized implicit parameters can only be passed to "
"Torque-defined macros: the implicit parameter 'c' is not "
@@ -901,7 +902,7 @@ TEST(Torque, UnusedImplicit) {
macro Test7<T: type>(implicit c: Smi)(o: T): Smi;
Test7<Smi>(implicit c: Smi)(o: Smi): Smi { return o; }
@export
- macro Test8(b: Smi) { Test7(b); }
+ macro Test8(b: Smi): void { Test7(b); }
)");
ExpectFailingCompilation(
@@ -913,7 +914,7 @@ TEST(Torque, UnusedImplicit) {
macro Test7<T: type>(o: T): Smi;
Test7<Smi>(o: Smi): Smi { return Test6<Smi>(o); }
@export
- macro Test8(b: Smi) { Test7(b); }
+ macro Test8(b: Smi): void { Test7(b); }
)",
HasSubstr("\nambiguous callable : \n Test6(Smi)\ncandidates are:\n "
"Test6(Smi): Smi\n Test6(implicit Smi)(Smi): Smi"));
@@ -921,27 +922,27 @@ TEST(Torque, UnusedImplicit) {
TEST(Torque, ImplicitTemplateParameterInference) {
ExpectSuccessfulCompilation(R"(
- macro Foo(_x: Map) {}
- macro Foo(_x: Smi) {}
- macro GenericMacro<T: type>(implicit x: T)() {
+ macro Foo(_x: Map): void {}
+ macro Foo(_x: Smi): void {}
+ macro GenericMacro<T: type>(implicit x: T)(): void {
Foo(x);
}
@export
- macro Test1(implicit x: Smi)() { GenericMacro(); }
+ macro Test1(implicit x: Smi)(): void { GenericMacro(); }
@export
- macro Test2(implicit x: Map)() { GenericMacro(); }
+ macro Test2(implicit x: Map)(): void { GenericMacro(); }
)");
ExpectFailingCompilation(
R"(
// Wrap in namespace to avoid redeclaration error.
namespace foo {
- macro Foo(implicit x: Map)() {}
+ macro Foo(implicit x: Map)(): void {}
}
- macro Foo(implicit x: Smi)() {}
+ macro Foo(implicit x: Smi)(): void {}
namespace foo{
@export
- macro Test(implicit x: Smi)() { Foo(); }
+ macro Test(implicit x: Smi)(): void { Foo(); }
}
)",
HasSubstr("ambiguous callable"));
@@ -950,12 +951,12 @@ TEST(Torque, ImplicitTemplateParameterInference) {
R"(
// Wrap in namespace to avoid redeclaration error.
namespace foo {
- macro Foo(implicit x: Map)() {}
+ macro Foo(implicit x: Map)(): void {}
}
- macro Foo(implicit x: Smi)() {}
+ macro Foo(implicit x: Smi)(): void {}
namespace foo{
@export
- macro Test(implicit x: Map)() { Foo(); }
+ macro Test(implicit x: Map)(): void { Foo(); }
}
)",
HasSubstr("ambiguous callable"));
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index 393256b0a4..d649e36148 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -87,7 +87,7 @@ class TestModuleBuilder {
return static_cast<byte>(mod.globals.size() - 1);
}
byte AddSignature(const FunctionSig* sig) {
- mod.add_signature(sig);
+ mod.add_signature(sig, kNoSuperType);
CHECK_LE(mod.types.size(), kMaxByteSizedLeb128);
return static_cast<byte>(mod.types.size() - 1);
}
@@ -127,19 +127,20 @@ class TestModuleBuilder {
return static_cast<byte>(mod.tables.size() - 1);
}
- byte AddStruct(std::initializer_list<F> fields) {
+ byte AddStruct(std::initializer_list<F> fields,
+ uint32_t supertype = kNoSuperType) {
StructType::Builder type_builder(mod.signature_zone.get(),
static_cast<uint32_t>(fields.size()));
for (F field : fields) {
type_builder.AddField(field.first, field.second);
}
- mod.add_struct_type(type_builder.Build());
+ mod.add_struct_type(type_builder.Build(), supertype);
return static_cast<byte>(mod.type_kinds.size() - 1);
}
byte AddArray(ValueType type, bool mutability) {
ArrayType* array = mod.signature_zone->New<ArrayType>(type, mutability);
- mod.add_array_type(array);
+ mod.add_array_type(array, kNoSuperType);
return static_cast<byte>(mod.type_kinds.size() - 1);
}
@@ -705,21 +706,33 @@ TEST_F(FunctionBodyDecoderTest, BlockType) {
}
TEST_F(FunctionBodyDecoderTest, BlockType_fail) {
- ExpectFailure(sigs.i_i(), {WASM_BLOCK_L(WASM_I64V_1(0))});
- ExpectFailure(sigs.i_i(), {WASM_BLOCK_F(WASM_F32(0.0))});
- ExpectFailure(sigs.i_i(), {WASM_BLOCK_D(WASM_F64(1.1))});
-
- ExpectFailure(sigs.l_l(), {WASM_BLOCK_I(WASM_ZERO)});
- ExpectFailure(sigs.l_l(), {WASM_BLOCK_F(WASM_F32(0.0))});
- ExpectFailure(sigs.l_l(), {WASM_BLOCK_D(WASM_F64(1.1))});
-
- ExpectFailure(sigs.f_ff(), {WASM_BLOCK_I(WASM_ZERO)});
- ExpectFailure(sigs.f_ff(), {WASM_BLOCK_L(WASM_I64V_1(0))});
- ExpectFailure(sigs.f_ff(), {WASM_BLOCK_D(WASM_F64(1.1))});
-
- ExpectFailure(sigs.d_dd(), {WASM_BLOCK_I(WASM_ZERO)});
- ExpectFailure(sigs.d_dd(), {WASM_BLOCK_L(WASM_I64V_1(0))});
- ExpectFailure(sigs.d_dd(), {WASM_BLOCK_F(WASM_F32(0.0))});
+ ExpectFailure(sigs.i_i(), {WASM_BLOCK_L(WASM_I64V_1(0))}, kAppendEnd,
+ "type error in fallthru[0]");
+ ExpectFailure(sigs.i_i(), {WASM_BLOCK_F(WASM_F32(0.0))}, kAppendEnd,
+ "type error in fallthru[0]");
+ ExpectFailure(sigs.i_i(), {WASM_BLOCK_D(WASM_F64(1.1))}, kAppendEnd,
+ "type error in fallthru[0]");
+
+ ExpectFailure(sigs.l_l(), {WASM_BLOCK_I(WASM_ZERO)}, kAppendEnd,
+ "type error in fallthru[0]");
+ ExpectFailure(sigs.l_l(), {WASM_BLOCK_F(WASM_F32(0.0))}, kAppendEnd,
+ "type error in fallthru[0]");
+ ExpectFailure(sigs.l_l(), {WASM_BLOCK_D(WASM_F64(1.1))}, kAppendEnd,
+ "type error in fallthru[0]");
+
+ ExpectFailure(sigs.f_ff(), {WASM_BLOCK_I(WASM_ZERO)}, kAppendEnd,
+ "type error in fallthru[0]");
+ ExpectFailure(sigs.f_ff(), {WASM_BLOCK_L(WASM_I64V_1(0))}, kAppendEnd,
+ "type error in fallthru[0]");
+ ExpectFailure(sigs.f_ff(), {WASM_BLOCK_D(WASM_F64(1.1))}, kAppendEnd,
+ "type error in fallthru[0]");
+
+ ExpectFailure(sigs.d_dd(), {WASM_BLOCK_I(WASM_ZERO)}, kAppendEnd,
+ "type error in fallthru[0]");
+ ExpectFailure(sigs.d_dd(), {WASM_BLOCK_L(WASM_I64V_1(0))}, kAppendEnd,
+ "type error in fallthru[0]");
+ ExpectFailure(sigs.d_dd(), {WASM_BLOCK_F(WASM_F32(0.0))}, kAppendEnd,
+ "type error in fallthru[0]");
}
TEST_F(FunctionBodyDecoderTest, BlockF32) {
@@ -752,7 +765,7 @@ TEST_F(FunctionBodyDecoderTest, Block3_continue) {
}
TEST_F(FunctionBodyDecoderTest, NestedBlock_return) {
- ExpectValidates(sigs.i_i(), {B1(B1(WASM_RETURN1(WASM_ZERO))), WASM_ZERO});
+ ExpectValidates(sigs.i_i(), {B1(B1(WASM_RETURN(WASM_ZERO))), WASM_ZERO});
}
TEST_F(FunctionBodyDecoderTest, BlockBrBinop) {
@@ -1123,11 +1136,12 @@ TEST_F(FunctionBodyDecoderTest, UnreachableRefTypes) {
WASM_GC_OP(kExprStructNewWithRtt), struct_index,
kExprCallFunction, struct_consumer});
ExpectValidates(sigs.v_v(),
- {WASM_UNREACHABLE, WASM_GC_OP(kExprStructNewDefault),
+ {WASM_UNREACHABLE, WASM_GC_OP(kExprStructNewDefaultWithRtt),
struct_index, kExprDrop});
- ExpectValidates(sigs.v_v(), {WASM_UNREACHABLE, WASM_RTT_CANON(struct_index),
- WASM_GC_OP(kExprStructNewDefault), struct_index,
- kExprCallFunction, struct_consumer});
+ ExpectValidates(sigs.v_v(),
+ {WASM_UNREACHABLE, WASM_RTT_CANON(struct_index),
+ WASM_GC_OP(kExprStructNewDefaultWithRtt), struct_index,
+ kExprCallFunction, struct_consumer});
ExpectValidates(sigs.v_v(),
{WASM_UNREACHABLE, WASM_GC_OP(kExprArrayNewWithRtt),
@@ -1139,11 +1153,11 @@ TEST_F(FunctionBodyDecoderTest, UnreachableRefTypes) {
{WASM_UNREACHABLE, WASM_I32V(42), WASM_RTT_CANON(array_index),
WASM_GC_OP(kExprArrayNewWithRtt), array_index, kExprDrop});
ExpectValidates(sigs.v_v(),
- {WASM_UNREACHABLE, WASM_GC_OP(kExprArrayNewDefault),
+ {WASM_UNREACHABLE, WASM_GC_OP(kExprArrayNewDefaultWithRtt),
array_index, kExprDrop});
- ExpectValidates(sigs.v_v(),
- {WASM_UNREACHABLE, WASM_RTT_CANON(array_index),
- WASM_GC_OP(kExprArrayNewDefault), array_index, kExprDrop});
+ ExpectValidates(sigs.v_v(), {WASM_UNREACHABLE, WASM_RTT_CANON(array_index),
+ WASM_GC_OP(kExprArrayNewDefaultWithRtt),
+ array_index, kExprDrop});
ExpectValidates(sigs.i_v(), {WASM_UNREACHABLE, WASM_GC_OP(kExprRefTest),
struct_index, struct_index});
@@ -1367,13 +1381,13 @@ TEST_F(FunctionBodyDecoderTest, MultipleReturn) {
static ValueType kIntTypes5[] = {kWasmI32, kWasmI32, kWasmI32, kWasmI32,
kWasmI32};
FunctionSig sig_ii_v(2, 0, kIntTypes5);
- ExpectValidates(&sig_ii_v, {WASM_RETURNN(2, WASM_ZERO, WASM_ONE)});
- ExpectFailure(&sig_ii_v, {WASM_RETURNN(1, WASM_ZERO)});
+ ExpectValidates(&sig_ii_v, {WASM_RETURN(WASM_ZERO, WASM_ONE)});
+ ExpectFailure(&sig_ii_v, {WASM_RETURN(WASM_ZERO)});
FunctionSig sig_iii_v(3, 0, kIntTypes5);
ExpectValidates(&sig_iii_v,
- {WASM_RETURNN(3, WASM_ZERO, WASM_ONE, WASM_I32V_1(44))});
- ExpectFailure(&sig_iii_v, {WASM_RETURNN(2, WASM_ZERO, WASM_ONE)});
+ {WASM_RETURN(WASM_ZERO, WASM_ONE, WASM_I32V_1(44))});
+ ExpectFailure(&sig_iii_v, {WASM_RETURN(WASM_ZERO, WASM_ONE)});
}
TEST_F(FunctionBodyDecoderTest, MultipleReturn_fallthru) {
@@ -1711,6 +1725,16 @@ TEST_F(FunctionBodyDecoderTest, ReturnCallsWithTooFewArguments) {
ExpectFailure(sig, {WASM_RETURN_CALL_FUNCTION(2, WASM_LOCAL_GET(0))});
}
+TEST_F(FunctionBodyDecoderTest, ReturnCallWithSubtype) {
+ WASM_FEATURE_SCOPE(return_call);
+
+ auto sig = MakeSig::Returns(kWasmExternRef);
+ auto callee_sig = MakeSig::Returns(kWasmExternNonNullableRef);
+ builder.AddFunction(&callee_sig);
+
+ ExpectValidates(&sig, {WASM_RETURN_CALL_FUNCTION0(0)});
+}
+
TEST_F(FunctionBodyDecoderTest, ReturnCallsWithMismatchedSigs) {
WASM_FEATURE_SCOPE(return_call);
@@ -1966,10 +1990,10 @@ TEST_F(FunctionBodyDecoderTest, TablesWithFunctionSubtyping) {
// that is a subtype of the table type.
ExpectValidates(
FunctionSig::Build(zone(), {ValueType::Ref(sub_struct, kNullable)}, {}),
- {WASM_CALL_INDIRECT_TABLE(
- table, function_type,
- WASM_STRUCT_NEW_DEFAULT(super_struct, WASM_RTT_CANON(super_struct)),
- WASM_ZERO)});
+ {WASM_CALL_INDIRECT_TABLE(table, function_type,
+ WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
+ super_struct, WASM_RTT_CANON(super_struct)),
+ WASM_ZERO)});
// table.set's subtyping works as expected.
ExpectValidates(sigs.v_i(), {WASM_TABLE_SET(0, WASM_LOCAL_GET(0),
@@ -2678,13 +2702,13 @@ TEST_F(FunctionBodyDecoderTest, BrTableSubtyping) {
{F(kWasmI8, true), F(kWasmI16, false), F(kWasmI32, true)});
ExpectValidates(
sigs.v_v(),
- {WASM_BLOCK_R(
- wasm::ValueType::Ref(supertype1, kNonNullable),
- WASM_BLOCK_R(
- wasm::ValueType::Ref(supertype2, kNonNullable),
- WASM_STRUCT_NEW_DEFAULT(subtype, WASM_RTT_CANON(subtype)),
- WASM_BR_TABLE(WASM_I32V(5), 1, BR_TARGET(0), BR_TARGET(1))),
- WASM_UNREACHABLE),
+ {WASM_BLOCK_R(wasm::ValueType::Ref(supertype1, kNonNullable),
+ WASM_BLOCK_R(wasm::ValueType::Ref(supertype2, kNonNullable),
+ WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
+ subtype, WASM_RTT_CANON(subtype)),
+ WASM_BR_TABLE(WASM_I32V(5), 1, BR_TARGET(0),
+ BR_TARGET(1))),
+ WASM_UNREACHABLE),
WASM_DROP});
}
@@ -3021,16 +3045,29 @@ TEST_F(FunctionBodyDecoderTest, MultiValBlock1) {
ExpectValidates(
sigs.i_ii(),
{WASM_BLOCK_X(sig0, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)), kExprI32Add});
- ExpectFailure(sigs.i_ii(), {WASM_BLOCK_X(sig0, WASM_NOP), kExprI32Add});
- ExpectFailure(sigs.i_ii(),
- {WASM_BLOCK_X(sig0, WASM_LOCAL_GET(0)), kExprI32Add});
+ ExpectFailure(sigs.i_ii(), {WASM_BLOCK_X(sig0, WASM_NOP), kExprI32Add},
+ kAppendEnd,
+ "expected 2 elements on the stack for fallthru, found 0");
+ ExpectFailure(
+ sigs.i_ii(), {WASM_BLOCK_X(sig0, WASM_LOCAL_GET(0)), kExprI32Add},
+ kAppendEnd, "expected 2 elements on the stack for fallthru, found 1");
ExpectFailure(sigs.i_ii(),
{WASM_BLOCK_X(sig0, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
WASM_LOCAL_GET(0)),
- kExprI32Add});
+ kExprI32Add},
+ kAppendEnd,
+ "expected 2 elements on the stack for fallthru, found 3");
ExpectFailure(
sigs.i_ii(),
- {WASM_BLOCK_X(sig0, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)), kExprF32Add});
+ {WASM_BLOCK_X(sig0, WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)), kExprF32Add},
+ kAppendEnd, "f32.add[1] expected type f32, found block of type i32");
+
+ byte sig1 = builder.AddSignature(sigs.v_i());
+ ExpectFailure(
+ sigs.v_i(),
+ {WASM_LOCAL_GET(0), WASM_BLOCK(WASM_BLOCK_X(sig1, WASM_UNREACHABLE))},
+ kAppendEnd,
+ "not enough arguments on the stack for block (need 1, got 0)");
}
TEST_F(FunctionBodyDecoderTest, MultiValBlock2) {
@@ -3148,17 +3185,17 @@ TEST_F(FunctionBodyDecoderTest, BlockParam) {
ExpectValidates(sigs.i_ii(), {WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
WASM_BLOCK_X(sig1, WASM_NOP),
WASM_I32_ADD(WASM_NOP, WASM_NOP)});
- ExpectFailure(sigs.i_ii(), {WASM_BLOCK_X(sig1, WASM_NOP),
- WASM_RETURN1(WASM_LOCAL_GET(0))});
+ ExpectFailure(sigs.i_ii(),
+ {WASM_BLOCK_X(sig1, WASM_NOP), WASM_RETURN(WASM_LOCAL_GET(0))});
ExpectFailure(sigs.i_ii(), {WASM_BLOCK_X(sig1, WASM_LOCAL_GET(0)),
- WASM_RETURN1(WASM_LOCAL_GET(0))});
+ WASM_RETURN(WASM_LOCAL_GET(0))});
ExpectFailure(
sigs.i_ii(),
{WASM_LOCAL_GET(0), WASM_BLOCK_X(sig2, WASM_I32_ADD(WASM_NOP, WASM_NOP)),
- WASM_RETURN1(WASM_LOCAL_GET(0))});
+ WASM_RETURN(WASM_LOCAL_GET(0))});
ExpectFailure(sigs.i_ii(),
{WASM_LOCAL_GET(0), WASM_BLOCK_X(sig1, WASM_F32_NEG(WASM_NOP)),
- WASM_RETURN1(WASM_LOCAL_GET(0))});
+ WASM_RETURN(WASM_LOCAL_GET(0))});
}
TEST_F(FunctionBodyDecoderTest, LoopParam) {
@@ -3174,16 +3211,16 @@ TEST_F(FunctionBodyDecoderTest, LoopParam) {
WASM_LOOP_X(sig1, WASM_NOP),
WASM_I32_ADD(WASM_NOP, WASM_NOP)});
ExpectFailure(sigs.i_ii(),
- {WASM_LOOP_X(sig1, WASM_NOP), WASM_RETURN1(WASM_LOCAL_GET(0))});
+ {WASM_LOOP_X(sig1, WASM_NOP), WASM_RETURN(WASM_LOCAL_GET(0))});
ExpectFailure(sigs.i_ii(), {WASM_LOOP_X(sig1, WASM_LOCAL_GET(0)),
- WASM_RETURN1(WASM_LOCAL_GET(0))});
+ WASM_RETURN(WASM_LOCAL_GET(0))});
ExpectFailure(
sigs.i_ii(),
{WASM_LOCAL_GET(0), WASM_LOOP_X(sig2, WASM_I32_ADD(WASM_NOP, WASM_NOP)),
- WASM_RETURN1(WASM_LOCAL_GET(0))});
+ WASM_RETURN(WASM_LOCAL_GET(0))});
ExpectFailure(sigs.i_ii(),
{WASM_LOCAL_GET(0), WASM_LOOP_X(sig1, WASM_F32_NEG(WASM_NOP)),
- WASM_RETURN1(WASM_LOCAL_GET(0))});
+ WASM_RETURN(WASM_LOCAL_GET(0))});
}
TEST_F(FunctionBodyDecoderTest, LoopParamBr) {
@@ -3201,10 +3238,10 @@ TEST_F(FunctionBodyDecoderTest, LoopParamBr) {
{WASM_LOCAL_GET(0), WASM_LOOP_X(sig1, WASM_BLOCK_X(sig1, WASM_BR(1)))});
ExpectFailure(sigs.i_ii(),
{WASM_LOCAL_GET(0), WASM_LOOP_X(sig1, WASM_BLOCK(WASM_BR(1))),
- WASM_RETURN1(WASM_LOCAL_GET(0))});
+ WASM_RETURN(WASM_LOCAL_GET(0))});
ExpectFailure(sigs.i_ii(), {WASM_LOCAL_GET(0), WASM_LOCAL_GET(1),
WASM_LOOP_X(sig2, WASM_BLOCK_X(sig1, WASM_BR(1))),
- WASM_RETURN1(WASM_LOCAL_GET(0))});
+ WASM_RETURN(WASM_LOCAL_GET(0))});
}
TEST_F(FunctionBodyDecoderTest, IfParam) {
@@ -3620,7 +3657,7 @@ ValueType optref(byte type_index) {
return ValueType::Ref(type_index, kNullable);
}
-TEST_F(FunctionBodyDecoderTest, StructNewDefault) {
+TEST_F(FunctionBodyDecoderTest, StructNewDefaultWithRtt) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
@@ -3629,12 +3666,12 @@ TEST_F(FunctionBodyDecoderTest, StructNewDefault) {
byte type_index = builder.AddStruct({F(kWasmI32, true)});
byte bad_type_index = builder.AddStruct({F(ref(type_index), true)});
module = builder.module();
- ExpectValidates(sigs.v_v(), {WASM_STRUCT_NEW_DEFAULT(
+ ExpectValidates(sigs.v_v(), {WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
type_index, WASM_RTT_CANON(type_index)),
WASM_DROP});
ExpectFailure(sigs.v_v(),
- {WASM_STRUCT_NEW_DEFAULT(bad_type_index,
- WASM_RTT_CANON(bad_type_index)),
+ {WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
+ bad_type_index, WASM_RTT_CANON(bad_type_index)),
WASM_DROP});
}
{
@@ -3643,16 +3680,43 @@ TEST_F(FunctionBodyDecoderTest, StructNewDefault) {
byte bad_type_index = builder.AddArray(ref(type_index), true);
module = builder.module();
ExpectValidates(sigs.v_v(),
- {WASM_ARRAY_NEW_DEFAULT(type_index, WASM_I32V(3),
- WASM_RTT_CANON(type_index)),
+ {WASM_ARRAY_NEW_DEFAULT_WITH_RTT(
+ type_index, WASM_I32V(3), WASM_RTT_CANON(type_index)),
WASM_DROP});
- ExpectFailure(sigs.v_v(),
- {WASM_ARRAY_NEW_DEFAULT(bad_type_index, WASM_I32V(3),
- WASM_RTT_CANON(bad_type_index)),
- WASM_DROP});
+ ExpectFailure(sigs.v_v(), {WASM_ARRAY_NEW_DEFAULT_WITH_RTT(
+ bad_type_index, WASM_I32V(3),
+ WASM_RTT_CANON(bad_type_index)),
+ WASM_DROP});
}
}
+TEST_F(FunctionBodyDecoderTest, NominalStructSubtyping) {
+ WASM_FEATURE_SCOPE(reftypes);
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+ byte structural_type = builder.AddStruct({F(kWasmI32, true)});
+ byte nominal_type = builder.AddStruct({F(kWasmI32, true)}, kGenericSuperType);
+ AddLocals(optref(structural_type), 1);
+ AddLocals(optref(nominal_type), 1);
+ // Try to assign a nominally-typed value to a structurally-typed local.
+ ExpectFailure(sigs.v_v(),
+ {WASM_LOCAL_SET(0, WASM_STRUCT_NEW_DEFAULT(nominal_type))},
+ kAppendEnd, "expected type (ref null 0)");
+ // Try to assign a structurally-typed value to a nominally-typed local.
+ ExpectFailure(sigs.v_v(),
+ {WASM_LOCAL_SET(
+ 1, WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
+ structural_type, WASM_RTT_CANON(structural_type)))},
+ kAppendEnd, "expected type (ref null 1)");
+ // But assigning to the correctly typed local works.
+ ExpectValidates(sigs.v_v(),
+ {WASM_LOCAL_SET(1, WASM_STRUCT_NEW_DEFAULT(nominal_type))});
+ ExpectValidates(sigs.v_v(),
+ {WASM_LOCAL_SET(0, WASM_STRUCT_NEW_DEFAULT_WITH_RTT(
+ structural_type,
+ WASM_RTT_CANON(structural_type)))});
+}
+
TEST_F(FunctionBodyDecoderTest, DefaultableLocal) {
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(reftypes);
@@ -3953,8 +4017,8 @@ TEST_F(FunctionBodyDecoderTest, GCStruct) {
{WASM_STRUCT_NEW_WITH_RTT(struct_type_index,
WASM_RTT_CANON(struct_type_index))},
kAppendEnd,
- "not enough arguments on the stack for struct.new_with_rtt, "
- "expected 1 more");
+ "not enough arguments on the stack for struct.new_with_rtt "
+ "(need 2, got 1)");
// Too many arguments.
ExpectFailure(
&sig_r_v,
@@ -4088,8 +4152,8 @@ TEST_F(FunctionBodyDecoderTest, GCArray) {
{WASM_I32V(10), WASM_RTT_CANON(array_type_index),
WASM_GC_OP(kExprArrayNewWithRtt), array_type_index},
kAppendEnd,
- "not enough arguments on the stack for array.new_with_rtt, "
- "expected 1 more");
+ "not enough arguments on the stack for array.new_with_rtt "
+ "(need 3, got 2)");
// Mistyped initializer.
ExpectFailure(&sig_r_v,
{WASM_ARRAY_NEW_WITH_RTT(
@@ -4632,11 +4696,11 @@ TEST_F(FunctionBodyDecoderTest, LocalTeeTyping) {
AddLocals(ValueType::Ref(array_type, kNullable), 1);
- ExpectFailure(
- &sig,
- {WASM_LOCAL_TEE(0, WASM_ARRAY_NEW_DEFAULT(array_type, WASM_I32V(5),
- WASM_RTT_CANON(array_type)))},
- kAppendEnd, "expected (ref 0), got (ref null 0)");
+ ExpectFailure(&sig,
+ {WASM_LOCAL_TEE(0, WASM_ARRAY_NEW_DEFAULT_WITH_RTT(
+ array_type, WASM_I32V(5),
+ WASM_RTT_CANON(array_type)))},
+ kAppendEnd, "expected (ref 0), got (ref null 0)");
}
TEST_F(FunctionBodyDecoderTest, MergeNullableTypes) {
@@ -4654,7 +4718,7 @@ TEST_F(FunctionBodyDecoderTest, MergeNullableTypes) {
// Regression test for crbug.com/1234453.
ExpectValidates(sigs.v_v(),
{WASM_GC_OP(kExprRttCanon), struct_type_index,
- WASM_GC_OP(kExprStructNewDefault), struct_type_index,
+ WASM_GC_OP(kExprStructNewDefaultWithRtt), struct_type_index,
WASM_LOOP_X(loop_sig_index, kExprDrop, kExprRefNull,
struct_type_index, kExprBr, 0)});
}
diff --git a/deps/v8/test/unittests/wasm/memory-protection-unittest.cc b/deps/v8/test/unittests/wasm/memory-protection-unittest.cc
index 65f8aeafe1..73062c1057 100644
--- a/deps/v8/test/unittests/wasm/memory-protection-unittest.cc
+++ b/deps/v8/test/unittests/wasm/memory-protection-unittest.cc
@@ -2,6 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8config.h"
+
+// TODO(clemensb): Extend this to other OSes.
+#if V8_OS_POSIX && !V8_OS_FUCHSIA
+#include <signal.h>
+#endif // V8_OS_POSIX && !V8_OS_FUCHSIA
+
+#include "src/base/macros.h"
#include "src/flags/flags.h"
#include "src/wasm/code-space-access.h"
#include "src/wasm/module-compiler.h"
@@ -11,6 +19,7 @@
#include "src/wasm/wasm-opcodes.h"
#include "test/common/wasm/wasm-macro-gen.h"
#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock-matchers.h"
namespace v8 {
namespace internal {
@@ -44,6 +53,9 @@ class MemoryProtectionTest : public TestWithNativeContext {
FLAG_wasm_memory_protection_keys = enable_pku;
if (enable_pku) {
GetWasmCodeManager()->InitializeMemoryProtectionKeyForTesting();
+ // The key is initially write-protected.
+ CHECK_IMPLIES(GetWasmCodeManager()->HasMemoryProtectionKeySupport(),
+ !GetWasmCodeManager()->MemoryProtectionKeyWritable());
}
bool enable_mprotect =
@@ -177,6 +189,163 @@ TEST_P(ParameterizedMemoryProtectionTest, CodeNotWritableAfterScope) {
AssertCodeEventuallyProtected();
}
+#if V8_OS_POSIX && !V8_OS_FUCHSIA
+class ParameterizedMemoryProtectionTestWithSignalHandling
+ : public MemoryProtectionTest,
+ public ::testing::WithParamInterface<
+ std::tuple<MemoryProtectionMode, bool, bool>> {
+ public:
+ class SignalHandlerScope {
+ public:
+ SignalHandlerScope() {
+ CHECK_NULL(current_handler_scope_);
+ current_handler_scope_ = this;
+ struct sigaction sa;
+ sa.sa_sigaction = &HandleSignal;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART | SA_SIGINFO | SA_ONSTACK;
+ CHECK_EQ(0, sigaction(SIGPROF, &sa, &old_signal_handler_));
+ }
+
+ ~SignalHandlerScope() {
+ CHECK_EQ(current_handler_scope_, this);
+ current_handler_scope_ = nullptr;
+ sigaction(SIGPROF, &old_signal_handler_, nullptr);
+ }
+
+ void SetAddressToWriteToOnSignal(uint8_t* address) {
+ CHECK_NULL(code_address_);
+ CHECK_NOT_NULL(address);
+ code_address_ = address;
+ }
+
+ int num_handled_signals() const { return handled_signals_; }
+
+ private:
+ static void HandleSignal(int signal, siginfo_t*, void*) {
+ // We execute on POSIX only, so we just directly use {printf} and friends.
+ if (signal == SIGPROF) {
+ printf("Handled SIGPROF.\n");
+ } else {
+ printf("Handled unknown signal: %d.\n", signal);
+ }
+ CHECK_NOT_NULL(current_handler_scope_);
+ current_handler_scope_->handled_signals_ += 1;
+ if (uint8_t* write_address = current_handler_scope_->code_address_) {
+ // Print to the error output such that we can check against this message
+ // in the ASSERT_DEATH_IF_SUPPORTED below.
+ fprintf(stderr, "Writing to code.\n");
+ // This write will crash if code is protected.
+ *write_address = 0;
+ fprintf(stderr, "Successfully wrote to code.\n");
+ }
+ }
+
+ struct sigaction old_signal_handler_;
+ int handled_signals_ = 0;
+ uint8_t* code_address_ = nullptr;
+
+ // These are accessed from the signal handler.
+ static SignalHandlerScope* current_handler_scope_;
+ };
+
+ void SetUp() override { Initialize(std::get<0>(GetParam())); }
+};
+
+// static
+ParameterizedMemoryProtectionTestWithSignalHandling::SignalHandlerScope*
+ ParameterizedMemoryProtectionTestWithSignalHandling::SignalHandlerScope::
+ current_handler_scope_ = nullptr;
+
+std::string PrintMemoryProtectionAndSignalHandlingTestParam(
+ ::testing::TestParamInfo<std::tuple<MemoryProtectionMode, bool, bool>>
+ info) {
+ MemoryProtectionMode protection_mode = std::get<0>(info.param);
+ const bool write_in_signal_handler = std::get<1>(info.param);
+ const bool open_write_scope = std::get<2>(info.param);
+ return std::string{MemoryProtectionModeToString(protection_mode)} + "_" +
+ (write_in_signal_handler ? "Write" : "NoWrite") + "_" +
+ (open_write_scope ? "WithScope" : "NoScope");
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ MemoryProtection, ParameterizedMemoryProtectionTestWithSignalHandling,
+ ::testing::Combine(::testing::Values(kNoProtection, kPku, kMprotect,
+ kPkuWithMprotectFallback),
+ ::testing::Bool(), ::testing::Bool()),
+ PrintMemoryProtectionAndSignalHandlingTestParam);
+
+TEST_P(ParameterizedMemoryProtectionTestWithSignalHandling, TestSignalHandler) {
+ // We must run in the "threadsafe" mode in order to make the spawned process
+ // for the death test(s) re-execute the whole unit test up to the point of the
+ // death test. Otherwise we would not really test the signal handling setup
+ // that we use in the wild.
+ // (see https://google.github.io/googletest/reference/assertions.html)
+ CHECK_EQ("threadsafe", ::testing::GTEST_FLAG(death_test_style));
+
+ const bool write_in_signal_handler = std::get<1>(GetParam());
+ const bool open_write_scope = std::get<2>(GetParam());
+ CompileModule();
+ SignalHandlerScope signal_handler_scope;
+
+ CHECK_EQ(0, signal_handler_scope.num_handled_signals());
+ pthread_kill(pthread_self(), SIGPROF);
+ CHECK_EQ(1, signal_handler_scope.num_handled_signals());
+
+ uint8_t* code_start_ptr = &code()->instructions()[0];
+ uint8_t code_start = *code_start_ptr;
+ CHECK_NE(0, code_start);
+ if (write_in_signal_handler) {
+ signal_handler_scope.SetAddressToWriteToOnSignal(code_start_ptr);
+ }
+
+ // If the signal handler writes to protected code we expect a crash.
+ // An exception is M1, where an open scope still has an effect in the signal
+ // handler.
+ bool expect_crash = write_in_signal_handler && code_is_protected() &&
+ (!V8_HAS_PTHREAD_JIT_WRITE_PROTECT || !open_write_scope);
+ if (expect_crash) {
+ // Avoid {ASSERT_DEATH_IF_SUPPORTED}, because it only accepts a regex as
+ // second parameter, and not a matcher as {ASSERT_DEATH}.
+#if GTEST_HAS_DEATH_TEST
+ ASSERT_DEATH(
+ // The signal handler should crash, but it might "accidentally"
+ // succeed if tier-up is running in the background and using mprotect
+ // to unprotect the code for the whole process. In that case we
+ // repeatedly send the signal until we crash.
+ do {
+ base::Optional<CodeSpaceWriteScope> write_scope;
+ if (open_write_scope) write_scope.emplace(native_module());
+ pthread_kill(pthread_self(), SIGPROF);
+ base::OS::Sleep(base::TimeDelta::FromMilliseconds(10));
+ } while (uses_mprotect()), // Only loop for mprotect.
+ // Check that the subprocess tried to write, but did not succeed.
+ ::testing::AnyOf(
+ // non-sanitizer builds:
+ ::testing::EndsWith("Writing to code.\n"),
+ // ASan:
+ ::testing::HasSubstr("Writing to code.\n"
+ "AddressSanitizer:DEADLYSIGNAL"),
+ // MSan:
+ ::testing::HasSubstr("Writing to code.\n"
+ "MemorySanitizer:DEADLYSIGNAL"),
+ // UBSan:
+ ::testing::HasSubstr("Writing to code.\n"
+ "UndefinedBehaviorSanitizer:DEADLYSIGNAL")));
+#endif // GTEST_HAS_DEATH_TEST
+ } else {
+ base::Optional<CodeSpaceWriteScope> write_scope;
+ if (open_write_scope) write_scope.emplace(native_module());
+ // The signal handler does not write or code is not protected, hence this
+ // should succeed.
+ pthread_kill(pthread_self(), SIGPROF);
+
+ CHECK_EQ(2, signal_handler_scope.num_handled_signals());
+ CHECK_EQ(write_in_signal_handler ? 0 : code_start, *code_start_ptr);
+ }
+}
+#endif // V8_OS_POSIX && !V8_OS_FUCHSIA
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 7854b9d5d7..efb2c62741 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -39,6 +39,8 @@ namespace module_decoder_unittest {
WASM_STRUCT_NEW_WITH_RTT(index, __VA_ARGS__), kExprEnd
#define WASM_INIT_EXPR_ARRAY_INIT(index, length, ...) \
WASM_ARRAY_INIT(index, length, __VA_ARGS__), kExprEnd
+#define WASM_INIT_EXPR_ARRAY_INIT_STATIC(index, length, ...) \
+ WASM_ARRAY_INIT_STATIC(index, length, __VA_ARGS__), kExprEnd
#define WASM_INIT_EXPR_RTT_CANON(index) WASM_RTT_CANON(index), kExprEnd
#define REF_NULL_ELEMENT kExprRefNull, kFuncRefCode, kExprEnd
@@ -123,37 +125,37 @@ namespace module_decoder_unittest {
kWasmArrayTypeCode, type, (mutability ? 1 : 0)
#define WASM_FUNCTION_DEF(...) kWasmFunctionTypeCode, __VA_ARGS__
-#define EXPECT_VERIFIES(data) \
- do { \
- ModuleResult result = DecodeModule(data, data + sizeof(data)); \
- EXPECT_OK(result); \
+#define EXPECT_VERIFIES(data) \
+ do { \
+ ModuleResult _result = DecodeModule(data, data + sizeof(data)); \
+ EXPECT_OK(_result); \
} while (false)
-#define EXPECT_FAILURE_LEN(data, length) \
- do { \
- ModuleResult result = DecodeModule(data, data + length); \
- EXPECT_FALSE(result.ok()); \
+#define EXPECT_FAILURE_LEN(data, length) \
+ do { \
+ ModuleResult _result = DecodeModule(data, data + length); \
+ EXPECT_FALSE(_result.ok()); \
} while (false)
#define EXPECT_FAILURE(data) EXPECT_FAILURE_LEN(data, sizeof(data))
-#define EXPECT_FAILURE_WITH_MSG(data, msg) \
- do { \
- ModuleResult result = DecodeModule(data, data + sizeof(data)); \
- EXPECT_FALSE(result.ok()); \
- if (!result.ok()) { \
- EXPECT_THAT(result.error().message(), HasSubstr(msg)); \
- } \
- } while (false)
-
-#define EXPECT_OFF_END_FAILURE(data, min) \
+#define EXPECT_FAILURE_WITH_MSG(data, msg) \
do { \
- STATIC_ASSERT(min < arraysize(data)); \
- for (size_t length = min; length < arraysize(data); length++) { \
- EXPECT_FAILURE_LEN(data, length); \
+ ModuleResult _result = DecodeModule(data, data + sizeof(data)); \
+ EXPECT_FALSE(_result.ok()); \
+ if (!_result.ok()) { \
+ EXPECT_THAT(_result.error().message(), HasSubstr(msg)); \
} \
} while (false)
+#define EXPECT_OFF_END_FAILURE(data, min) \
+ do { \
+ STATIC_ASSERT(min < arraysize(data)); \
+ for (size_t _length = min; _length < arraysize(data); _length++) { \
+ EXPECT_FAILURE_LEN(data, _length); \
+ } \
+ } while (false)
+
#define EXPECT_OK(result) \
do { \
if (!result.ok()) { \
@@ -1116,6 +1118,13 @@ TEST_F(WasmModuleVerifyTest, ArrayInitInitExpr) {
WASM_INIT_EXPR_ARRAY_INIT(0, 3, WASM_I32V(10), WASM_I32V(20),
WASM_I32V(30), WASM_RTT_CANON(0)))};
EXPECT_VERIFIES(basic);
+ static const byte basic_nominal[] = {
+ SECTION(Type, ENTRY_COUNT(1), WASM_ARRAY_DEF(kI16Code, true)),
+ SECTION(Global, ENTRY_COUNT(1), // --
+ kRefCode, 0, 0, // type, mutability
+ WASM_INIT_EXPR_ARRAY_INIT_STATIC(0, 3, WASM_I32V(10),
+ WASM_I32V(20), WASM_I32V(30)))};
+ EXPECT_VERIFIES(basic_nominal);
static const byte type_error[] = {
SECTION(Type, ENTRY_COUNT(2), // --
@@ -1147,7 +1156,7 @@ TEST_F(WasmModuleVerifyTest, ArrayInitInitExpr) {
WASM_I32V(30), WASM_RTT_CANON(0)))};
EXPECT_FAILURE_WITH_MSG(
length_error,
- "not enough arguments on the stack for array.init, expected 7 more");
+ "not enough arguments on the stack for array.init (need 11, got 4)");
}
TEST_F(WasmModuleVerifyTest, EmptyStruct) {
@@ -1234,6 +1243,135 @@ TEST_F(WasmModuleVerifyTest, InvalidStructTypeDef) {
EXPECT_FAILURE_WITH_MSG(invalid_mutability, "invalid mutability");
}
+TEST_F(WasmModuleVerifyTest, NominalStructTypeDef) {
+ WASM_FEATURE_SCOPE(reftypes);
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+
+ // Inheritance: t1 <: t2 <: t0
+ static const byte all_good[] = {
+ SECTION(Type, ENTRY_COUNT(3), // --
+ kWasmStructSubtypeCode, // type #0
+ 1, // field count
+ kI32Code, 1, // mut i32
+ kDataRefCode, // root of type hierarchy
+
+ kWasmStructSubtypeCode, // type #1
+ 2, // field count
+ kI32Code, 1, // mut i32 (inherited)
+ kI64Code, 1, // mut i32 (added)
+ 2, // supertype
+
+ kWasmStructSubtypeCode, // type #2
+ 1, // field count
+ kI32Code, 1, // mut i32 (inherited)
+ 0)}; // supertype
+ EXPECT_VERIFIES(all_good);
+ ModuleResult result = DecodeModule(all_good, all_good + sizeof(all_good));
+ EXPECT_OK(result);
+ WasmModule* module = result.value().get();
+ EXPECT_EQ(kGenericSuperType, module->supertype(0));
+ EXPECT_EQ(2u, module->supertype(1));
+ EXPECT_EQ(0u, module->supertype(2));
+
+ static const byte self_or_mutual_ref[] = {
+ SECTION(Type, ENTRY_COUNT(4), // --
+ kWasmStructSubtypeCode, 0, // empty struct
+ kDataRefCode, // root of hierarchy
+
+ kWasmStructSubtypeCode, // type1
+ 1, // field count
+ kOptRefCode, 1, 1, // mut optref type1
+ 0, // supertype
+
+ kWasmStructSubtypeCode, // type 2
+ 1, // field count
+ kOptRefCode, 3, 1, // mut optref type3
+ 0, // supertype
+
+ kWasmStructSubtypeCode, // type 3
+ 1, // field count
+ kOptRefCode, 2, 1, // mut optref type2
+ 0)}; // supertype
+ EXPECT_VERIFIES(self_or_mutual_ref);
+
+ static const byte mutual_ref_with_subtyping[] = {
+ SECTION(Type,
+ ENTRY_COUNT(3), // --
+ kWasmStructSubtypeCode, //
+ 1, // field count
+ kOptRefCode, 0, 0, // ref type0
+ kDataRefCode, // root of hierarchy
+
+ kWasmStructSubtypeCode, // --
+ 1, // field count
+ kOptRefCode, 2, 0, // ref type2
+ 0, // supertype
+
+ kWasmStructSubtypeCode, // --
+ 1, // field count
+ kOptRefCode, 1, 0, // ref type1
+ 0)}; // supertype
+ EXPECT_VERIFIES(mutual_ref_with_subtyping);
+
+ static const byte inheritance_cycle[] = {
+ SECTION(Type, ENTRY_COUNT(2), // --
+ kWasmStructSubtypeCode, 0, 1, // no fields, supertype 1
+ kWasmStructSubtypeCode, 0, 0)}; // no fields, supertype 0
+ EXPECT_FAILURE_WITH_MSG(inheritance_cycle, "cyclic inheritance");
+
+ static const byte invalid_field[] = {
+ SECTION(Type, ENTRY_COUNT(2), // --
+ kWasmStructTypeCode, U32V_1(1), kI32Code, 1, // t0: [i32]
+ kWasmStructSubtypeCode, U32V_1(2), // t1:
+ kI64Code, 1, // i64 (invalid inheritance)
+ kI32Code, 1, U32V_1(0))}; // i32 (added), supertype 0
+ EXPECT_FAILURE_WITH_MSG(invalid_field, "invalid explicit supertype");
+
+ static const byte structural_supertype[] = {
+ SECTION(Type, ENTRY_COUNT(2), // --
+ kWasmStructTypeCode, 0, // empty struct
+ kWasmStructSubtypeCode, 0, // also empty
+ 0)}; // supertype is structural type
+ EXPECT_FAILURE_WITH_MSG(structural_supertype, "invalid explicit supertype");
+
+ static const byte supertype_oob[] = {
+ SECTION(Type, ENTRY_COUNT(1), // --
+ kWasmStructSubtypeCode,
+ 0, // empty struct
+ 13)}; // supertype with invalid index
+ EXPECT_FAILURE_WITH_MSG(supertype_oob, "Type index 13 is out of bounds");
+}
+
+TEST_F(WasmModuleVerifyTest, NominalFunctionTypeDef) {
+ WASM_FEATURE_SCOPE(reftypes);
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(gc);
+ EXPERIMENTAL_FLAG_SCOPE(gc); // Needed for subtype checking.
+
+ static const byte all_good[] = {
+ SECTION(Type, ENTRY_COUNT(2), // --
+ kWasmFunctionSubtypeCode, // type #0
+ 1, // params count
+ kRefCode, 0, // ref #0
+ 1, // results count
+ kOptRefCode, 0, // optref #0
+ kFuncRefCode, // root of type hierarchy
+
+ kWasmFunctionSubtypeCode, // type #1
+ 1, // params count
+ kOptRefCode, 0, // refined (contravariant)
+ 1, // results count
+ kRefCode, 0, // refined (covariant)
+ 0)}; // supertype
+ EXPECT_VERIFIES(all_good);
+ ModuleResult result = DecodeModule(all_good, all_good + sizeof(all_good));
+ EXPECT_OK(result);
+ WasmModule* module = result.value().get();
+ EXPECT_EQ(kGenericSuperType, module->supertype(0));
+ EXPECT_EQ(0u, module->supertype(1));
+}
+
TEST_F(WasmModuleVerifyTest, InvalidArrayTypeDef) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
diff --git a/deps/v8/test/unittests/wasm/subtyping-unittest.cc b/deps/v8/test/unittests/wasm/subtyping-unittest.cc
index c7a5470ca1..19ebd8e998 100644
--- a/deps/v8/test/unittests/wasm/subtyping-unittest.cc
+++ b/deps/v8/test/unittests/wasm/subtyping-unittest.cc
@@ -26,19 +26,21 @@ void DefineStruct(WasmModule* module, std::initializer_list<FieldInit> fields) {
for (FieldInit field : fields) {
builder.AddField(field.first, field.second);
}
- return module->add_struct_type(builder.Build());
+ return module->add_struct_type(builder.Build(), kNoSuperType);
}
void DefineArray(WasmModule* module, FieldInit element_type) {
module->add_array_type(module->signature_zone->New<ArrayType>(
- element_type.first, element_type.second));
+ element_type.first, element_type.second),
+ kNoSuperType);
}
void DefineSignature(WasmModule* module,
std::initializer_list<ValueType> params,
std::initializer_list<ValueType> returns) {
module->add_signature(
- FunctionSig::Build(module->signature_zone.get(), returns, params));
+ FunctionSig::Build(module->signature_zone.get(), returns, params),
+ kNoSuperType);
}
TEST_F(WasmSubtypingTest, Subtyping) {
diff --git a/deps/v8/test/unittests/wasm/trap-handler-simulator-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-simulator-unittest.cc
index dffa202b03..01d446879b 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-simulator-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-simulator-unittest.cc
@@ -4,6 +4,8 @@
#include "src/trap-handler/trap-handler-simulator.h"
+#include <cstdint>
+
#include "include/v8-initialization.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/execution/simulator.h"
@@ -23,6 +25,14 @@ constexpr uintptr_t kFakePc = 11;
class SimulatorTrapHandlerTest : public TestWithIsolate {
public:
+ ~SimulatorTrapHandlerTest() {
+ if (inaccessible_memory_) {
+ auto* page_allocator = GetPlatformPageAllocator();
+ CHECK(page_allocator->FreePages(inaccessible_memory_,
+ page_allocator->AllocatePageSize()));
+ }
+ }
+
void SetThreadInWasm() {
EXPECT_EQ(0, *thread_in_wasm);
*thread_in_wasm = 1;
@@ -33,7 +43,23 @@ class SimulatorTrapHandlerTest : public TestWithIsolate {
*thread_in_wasm = 0;
}
+ uintptr_t InaccessibleMemoryPtr() {
+ if (!inaccessible_memory_) {
+ auto* page_allocator = GetPlatformPageAllocator();
+ size_t page_size = page_allocator->AllocatePageSize();
+ inaccessible_memory_ =
+ reinterpret_cast<uint8_t*>(page_allocator->AllocatePages(
+ nullptr, /* size */ page_size, /* align */ page_size,
+ PageAllocator::kNoAccess));
+ CHECK_NOT_NULL(inaccessible_memory_);
+ }
+ return reinterpret_cast<uintptr_t>(inaccessible_memory_);
+ }
+
int* thread_in_wasm = trap_handler::GetThreadInWasmThreadLocalAddress();
+
+ private:
+ uint8_t* inaccessible_memory_ = nullptr;
};
TEST_F(SimulatorTrapHandlerTest, ProbeMemorySuccess) {
@@ -41,24 +67,26 @@ TEST_F(SimulatorTrapHandlerTest, ProbeMemorySuccess) {
EXPECT_EQ(0u, ProbeMemory(reinterpret_cast<uintptr_t>(&x), kFakePc));
}
-TEST_F(SimulatorTrapHandlerTest, ProbeMemoryFail) {
+TEST_F(SimulatorTrapHandlerTest, ProbeMemoryFailNullptr) {
constexpr uintptr_t kNullAddress = 0;
EXPECT_DEATH_IF_SUPPORTED(ProbeMemory(kNullAddress, kFakePc), "");
}
+TEST_F(SimulatorTrapHandlerTest, ProbeMemoryFailInaccessible) {
+ EXPECT_DEATH_IF_SUPPORTED(ProbeMemory(InaccessibleMemoryPtr(), kFakePc), "");
+}
+
TEST_F(SimulatorTrapHandlerTest, ProbeMemoryFailWhileInWasm) {
// Test that we still crash if the trap handler is set up and the "thread in
// wasm" flag is set, but the PC is not registered as a protected instruction.
constexpr bool kUseDefaultHandler = true;
CHECK(v8::V8::EnableWebAssemblyTrapHandler(kUseDefaultHandler));
- constexpr uintptr_t kNullAddress = 0;
SetThreadInWasm();
- EXPECT_DEATH_IF_SUPPORTED(ProbeMemory(kNullAddress, kFakePc), "");
+ EXPECT_DEATH_IF_SUPPORTED(ProbeMemory(InaccessibleMemoryPtr(), kFakePc), "");
}
TEST_F(SimulatorTrapHandlerTest, ProbeMemoryWithTrapHandled) {
- constexpr uintptr_t kNullAddress = 0;
constexpr uintptr_t kFakeLandingPad = 19;
constexpr bool kUseDefaultHandler = true;
@@ -69,7 +97,7 @@ TEST_F(SimulatorTrapHandlerTest, ProbeMemoryWithTrapHandled) {
RegisterHandlerData(0, 128, 1, &fake_protected_instruction);
SetThreadInWasm();
- EXPECT_EQ(kFakeLandingPad, ProbeMemory(kNullAddress, kFakePc));
+ EXPECT_EQ(kFakeLandingPad, ProbeMemory(InaccessibleMemoryPtr(), kFakePc));
// Reset everything.
ResetThreadInWasm();
@@ -87,9 +115,9 @@ TEST_F(SimulatorTrapHandlerTest, ProbeMemoryWithLandingPad) {
MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
buffer->CreateView());
// Generate an illegal memory access.
- masm.Mov(scratch, 0);
+ masm.Mov(scratch, InaccessibleMemoryPtr());
uint32_t crash_offset = masm.pc_offset();
- masm.Str(scratch, MemOperand(scratch, 0)); // nullptr access
+ masm.Str(scratch, MemOperand(scratch, 0)); // load from inaccessible memory.
uint32_t recovery_offset = masm.pc_offset();
// Return.
masm.Ret();
diff --git a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
index a83eb6bc48..3a308d46d8 100644
--- a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
@@ -80,7 +80,7 @@ TEST_F(WasmMacroGenTest, Statements) {
EXPECT_SIZE(7, WASM_LOOP(WASM_BR_IF(0, WASM_ZERO)));
EXPECT_SIZE(1, WASM_RETURN0);
- EXPECT_SIZE(3, WASM_RETURN1(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_RETURN(WASM_ZERO));
EXPECT_SIZE(1, WASM_UNREACHABLE);
}
diff --git a/deps/v8/test/wasm-api-tests/callbacks.cc b/deps/v8/test/wasm-api-tests/callbacks.cc
index 8df770732a..2d6fdcee3a 100644
--- a/deps/v8/test/wasm-api-tests/callbacks.cc
+++ b/deps/v8/test/wasm-api-tests/callbacks.cc
@@ -137,11 +137,11 @@ TEST_F(WasmCapiTest, Recursion) {
builder()->AddImport(base::CStrVector("fibonacci_c"), wasm_i_i_sig());
byte code_fibo[] = {
WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_ZERO),
- WASM_RETURN1(WASM_ZERO)),
- WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_ONE), WASM_RETURN1(WASM_ONE)),
+ WASM_RETURN(WASM_ZERO)),
+ WASM_IF(WASM_I32_EQ(WASM_LOCAL_GET(0), WASM_ONE), WASM_RETURN(WASM_ONE)),
// Muck with the parameter to ensure callers don't depend on its value.
WASM_LOCAL_SET(0, WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_ONE)),
- WASM_RETURN1(WASM_I32_ADD(
+ WASM_RETURN(WASM_I32_ADD(
WASM_CALL_FUNCTION(fibo_c_index, WASM_LOCAL_GET(0)),
WASM_CALL_FUNCTION(fibo_c_index,
WASM_I32_SUB(WASM_LOCAL_GET(0), WASM_ONE))))};
diff --git a/deps/v8/test/wasm-api-tests/finalize.cc b/deps/v8/test/wasm-api-tests/finalize.cc
index 08e894d81f..19646cf007 100644
--- a/deps/v8/test/wasm-api-tests/finalize.cc
+++ b/deps/v8/test/wasm-api-tests/finalize.cc
@@ -65,7 +65,7 @@ void RunInStore(Store* store, ZoneBuffer* wire_bytes, int iterations) {
TEST_F(WasmCapiTest, InstanceFinalization) {
// Add a dummy function: f(x) { return x; }
- byte code[] = {WASM_RETURN1(WASM_LOCAL_GET(0))};
+ byte code[] = {WASM_RETURN(WASM_LOCAL_GET(0))};
AddExportedFunction(base::CStrVector("f"), code, sizeof(code),
wasm_i_i_sig());
Compile();
diff --git a/deps/v8/test/wasm-js/testcfg.py b/deps/v8/test/wasm-js/testcfg.py
index 6a99554898..785853a48b 100644
--- a/deps/v8/test/wasm-js/testcfg.py
+++ b/deps/v8/test/wasm-js/testcfg.py
@@ -92,6 +92,10 @@ class TestCase(testcase.D8TestCase):
script = os.path.join(self.suite.test_root,
os.sep.join(['proposals', proposal['name']]),
script[len(WPT_ROOT):])
+ if 'wpt' in current_dir:
+ found = True
+ script = os.path.join(self.suite.test_root, 'wpt',
+ script[len(WPT_ROOT):])
if not found:
script = os.path.join(self.suite.test_root, script[len(WPT_ROOT):])
elif not script.startswith("/"):
diff --git a/deps/v8/test/wasm-js/tests.tar.gz.sha1 b/deps/v8/test/wasm-js/tests.tar.gz.sha1
index e0eefb98fe..8cd46e7cec 100644
--- a/deps/v8/test/wasm-js/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-js/tests.tar.gz.sha1
@@ -1 +1 @@
-47fd6174b31ba072c87780a01537140f817989dc \ No newline at end of file
+c12d3f56c36361cfd03dcf9f80519a35ba5d20f2 \ No newline at end of file
diff --git a/deps/v8/test/wasm-js/wasm-js.status b/deps/v8/test/wasm-js/wasm-js.status
index 39d9b86b95..3c1431eebd 100644
--- a/deps/v8/test/wasm-js/wasm-js.status
+++ b/deps/v8/test/wasm-js/wasm-js.status
@@ -5,16 +5,31 @@
[
[ALWAYS, {
+ # This test can only be executed in the browser
+ 'wpt/idlharness': [SKIP],
+ # Failing WPT tests
+ 'wpt/exception/toString.tentative': [FAIL],
+ 'wpt/exception/type.tentative': [FAIL],
+ 'wpt/function/constructor.tentative': [FAIL],
+ 'wpt/function/table.tentative': [FAIL],
+ 'wpt/function/type.tentative': [FAIL],
+
+ # Outdated proposal tests.
+ 'proposals/js-types/table/get-set': [FAIL],
+ 'proposals/memory64/table/get-set': [FAIL],
+ 'proposals/simd/table/get-set': [FAIL],
+ 'proposals/tail-call/table/get-set': [FAIL],
+ 'proposals/js-types/memory/constructor': [FAIL],
+ 'proposals/memory64/memory/constructor': [FAIL],
+ 'proposals/simd/memory/constructor': [FAIL],
+ 'proposals/tail-call/memory/constructor': [FAIL],
+
# TODO(v8:10556): Remove sub-typing in the reference-types implementation
- 'constructor/instantiate': [FAIL],
- 'instance/constructor': [FAIL],
'proposals/js-types/constructor/instantiate': [FAIL],
'proposals/js-types/global/constructor': [FAIL],
'proposals/js-types/global/value-get-set': [FAIL],
'proposals/js-types/instance/constructor': [FAIL],
- 'prototypes': [FAIL],
-
# These are slow, and not useful to run for the proposals:
'proposals/js-types/limits': [SKIP],
'proposals/simd/limits': [SKIP],
diff --git a/deps/v8/test/wasm-spec-tests/testcfg.py b/deps/v8/test/wasm-spec-tests/testcfg.py
index c3e57ce6c5..caa4a20b6f 100644
--- a/deps/v8/test/wasm-spec-tests/testcfg.py
+++ b/deps/v8/test/wasm-spec-tests/testcfg.py
@@ -18,11 +18,6 @@ proposal_flags = [{
'--wasm-staging']
},
{
- 'name': 'simd',
- 'flags': ['--experimental-wasm-simd',
- '--wasm-staging']
- },
- {
'name': 'memory64',
'flags': ['--experimental-wasm-memory64',
'--wasm-staging']
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index 1caa818336..5ff327e952 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-51142d7857b770528a57965d94c7b5365bf9c1a7 \ No newline at end of file
+fba50a99186f4c65c0482cf51ddbda9106a675bd \ No newline at end of file
diff --git a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
index 935f8a77d2..676c4a9627 100644
--- a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
+++ b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
@@ -25,7 +25,6 @@
'proposals/tail-call/func': [FAIL],
'proposals/tail-call/globals': [FAIL],
'proposals/tail-call/linking': [FAIL],
- 'proposals/tail-call/type': [FAIL],
# TODO(v8:11401): Fix memory64 spec tests / the v8 implementation (whatever
# is broken).
diff --git a/deps/v8/testing/gtest-support.h b/deps/v8/testing/gtest-support.h
index ba0e2f41f9..21c8ebf4d3 100644
--- a/deps/v8/testing/gtest-support.h
+++ b/deps/v8/testing/gtest-support.h
@@ -36,21 +36,20 @@ GET_TYPE_NAME(double)
// |var| while inside the loop body.
#define TRACED_FOREACH(_type, _var, _container) \
for (_type const _var : _container) \
- for (bool _done = false; !_done;) \
+ for (bool _var##_done = false; !_var##_done;) \
for (SCOPED_TRACE(::testing::Message() << #_var << " = " << _var); \
- !_done; _done = true)
-
+ !_var##_done; _var##_done = true)
// TRACED_FORRANGE(type, var, low, high) expands to a loop that assigns |var|
// every value in the range |low| to (including) |high| and adds a
// SCOPED_TRACE() message for the |var| while inside the loop body.
// TODO(bmeurer): Migrate to C++11 once we're ready.
#define TRACED_FORRANGE(_type, _var, _low, _high) \
- for (_type _i = _low; _i <= _high; ++_i) \
- for (bool _done = false; !_done;) \
- for (_type const _var = _i; !_done;) \
+ for (_type _var##_i = _low; _var##_i <= _high; ++_var##_i) \
+ for (bool _var##_done = false; !_var##_done;) \
+ for (_type const _var = _var##_i; !_var##_done;) \
for (SCOPED_TRACE(::testing::Message() << #_var << " = " << _var); \
- !_done; _done = true)
+ !_var##_done; _var##_done = true)
} // namespace internal
} // namespace testing
diff --git a/deps/v8/third_party/jinja2/tests.py b/deps/v8/third_party/jinja2/tests.py
index b14f85ff14..0adc3d4dbc 100644
--- a/deps/v8/third_party/jinja2/tests.py
+++ b/deps/v8/third_party/jinja2/tests.py
@@ -10,7 +10,7 @@
"""
import operator
import re
-from collections.abc import Mapping
+from collections import Mapping
from jinja2.runtime import Undefined
from jinja2._compat import text_type, string_types, integer_types
import decimal
diff --git a/deps/v8/third_party/v8/builtins/array-sort.tq b/deps/v8/third_party/v8/builtins/array-sort.tq
index 334bc44922..e63c224065 100644
--- a/deps/v8/third_party/v8/builtins/array-sort.tq
+++ b/deps/v8/third_party/v8/builtins/array-sort.tq
@@ -20,7 +20,7 @@ class SortState extends HeapObject {
return sortCompare(context, this.userCmpFn, x, y);
}
- macro CheckAccessor(implicit context: Context)() labels Bailout {
+ macro CheckAccessor(implicit context: Context)(): void labels Bailout {
if (!IsFastJSArray(this.receiver, context)) goto Bailout;
const canUseSameAccessorFn: CanUseSameAccessorFn =
@@ -33,7 +33,7 @@ class SortState extends HeapObject {
}
}
- macro ResetToGenericAccessor() {
+ macro ResetToGenericAccessor(): void {
this.loadFn = Load<GenericElementsAccessor>;
this.storeFn = Store<GenericElementsAccessor>;
this.deleteFn = Delete<GenericElementsAccessor>;
@@ -307,7 +307,7 @@ transitioning builtin Delete<ElementsAccessor : type extends ElementsKind>(
Delete<FastSmiElements>(
context: Context, sortState: SortState, index: Smi): Smi {
- assert(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
+ dcheck(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
const object = UnsafeCast<JSObject>(sortState.receiver);
const elements = UnsafeCast<FixedArray>(object.elements);
@@ -317,7 +317,7 @@ Delete<FastSmiElements>(
Delete<FastObjectElements>(
context: Context, sortState: SortState, index: Smi): Smi {
- assert(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
+ dcheck(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
const object = UnsafeCast<JSObject>(sortState.receiver);
const elements = UnsafeCast<FixedArray>(object.elements);
@@ -327,7 +327,7 @@ Delete<FastObjectElements>(
Delete<FastDoubleElements>(
context: Context, sortState: SortState, index: Smi): Smi {
- assert(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
+ dcheck(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
const object = UnsafeCast<JSObject>(sortState.receiver);
const elements = UnsafeCast<FixedDoubleArray>(object.elements);
@@ -337,7 +337,7 @@ Delete<FastDoubleElements>(
transitioning builtin SortCompareDefault(
context: Context, comparefn: JSAny, x: JSAny, y: JSAny): Number {
- assert(comparefn == Undefined);
+ dcheck(comparefn == Undefined);
if (TaggedIsSmi(x) && TaggedIsSmi(y)) {
return SmiLexicographicCompare(UnsafeCast<Smi>(x), UnsafeCast<Smi>(y));
@@ -365,7 +365,7 @@ transitioning builtin SortCompareDefault(
transitioning builtin SortCompareUserFn(
context: Context, comparefn: JSAny, x: JSAny, y: JSAny): Number {
- assert(comparefn != Undefined);
+ dcheck(comparefn != Undefined);
const cmpfn = UnsafeCast<Callable>(comparefn);
// a. Let v be ? ToNumber(? Call(comparefn, undefined, x, y)).
@@ -383,7 +383,7 @@ builtin CanUseSameAccessor<ElementsAccessor : type extends ElementsKind>(
initialReceiverLength: Number): Boolean {
if (receiver.map != initialReceiverMap) return False;
- assert(TaggedIsSmi(initialReceiverLength));
+ dcheck(TaggedIsSmi(initialReceiverLength));
const array = UnsafeCast<JSArray>(receiver);
const originalLength = UnsafeCast<Smi>(initialReceiverLength);
@@ -401,7 +401,7 @@ CanUseSameAccessor<GenericElementsAccessor>(
// for easier invariant checks at all use sites.
macro GetPendingRunsSize(implicit context: Context)(sortState: SortState): Smi {
const stackSize: Smi = sortState.pendingRunsSize;
- assert(stackSize >= 0);
+ dcheck(stackSize >= 0);
return stackSize;
}
@@ -410,7 +410,7 @@ macro GetPendingRunBase(implicit context: Context)(
return UnsafeCast<Smi>(pendingRuns.objects[run << 1]);
}
-macro SetPendingRunBase(pendingRuns: FixedArray, run: Smi, value: Smi) {
+macro SetPendingRunBase(pendingRuns: FixedArray, run: Smi, value: Smi): void {
pendingRuns.objects[run << 1] = value;
}
@@ -419,13 +419,13 @@ macro GetPendingRunLength(implicit context: Context)(
return UnsafeCast<Smi>(pendingRuns.objects[(run << 1) + 1]);
}
-macro SetPendingRunLength(pendingRuns: FixedArray, run: Smi, value: Smi) {
+macro SetPendingRunLength(pendingRuns: FixedArray, run: Smi, value: Smi): void {
pendingRuns.objects[(run << 1) + 1] = value;
}
macro PushRun(implicit context: Context)(
- sortState: SortState, base: Smi, length: Smi) {
- assert(GetPendingRunsSize(sortState) < kMaxMergePending);
+ sortState: SortState, base: Smi, length: Smi): void {
+ dcheck(GetPendingRunsSize(sortState) < kMaxMergePending);
const stackSize: Smi = GetPendingRunsSize(sortState);
const pendingRuns: FixedArray = sortState.pendingRuns;
@@ -458,10 +458,10 @@ transitioning builtin
Copy(implicit context: Context)(
source: FixedArray, srcPos: Smi, target: FixedArray, dstPos: Smi,
length: Smi): JSAny {
- assert(srcPos >= 0);
- assert(dstPos >= 0);
- assert(srcPos <= source.length - length);
- assert(dstPos <= target.length - length);
+ dcheck(srcPos >= 0);
+ dcheck(dstPos >= 0);
+ dcheck(srcPos <= source.length - length);
+ dcheck(dstPos <= target.length - length);
// TODO(szuend): Investigate whether this builtin should be replaced
// by CopyElements/MoveElements for perfomance.
@@ -498,8 +498,8 @@ Copy(implicit context: Context)(
// On entry, must have low <= start <= high, and that [low, start) is
// already sorted. Pass start == low if you do not know!.
macro BinaryInsertionSort(implicit context: Context, sortState: SortState)(
- low: Smi, startArg: Smi, high: Smi) {
- assert(low <= startArg && startArg <= high);
+ low: Smi, startArg: Smi, high: Smi): void {
+ dcheck(low <= startArg && startArg <= high);
const workArray = sortState.workArray;
@@ -515,7 +515,7 @@ macro BinaryInsertionSort(implicit context: Context, sortState: SortState)(
// Invariants:
// pivot >= all in [low, left).
// pivot < all in [right, start).
- assert(left < right);
+ dcheck(left < right);
// Find pivot insertion point.
while (left < right) {
@@ -529,7 +529,7 @@ macro BinaryInsertionSort(implicit context: Context, sortState: SortState)(
left = mid + 1;
}
}
- assert(left == right);
+ dcheck(left == right);
// The invariants still hold, so:
// pivot >= all in [low, left) and
@@ -564,7 +564,7 @@ macro BinaryInsertionSort(implicit context: Context, sortState: SortState)(
// returned length is always an ascending sequence.
macro CountAndMakeRun(implicit context: Context, sortState: SortState)(
lowArg: Smi, high: Smi): Smi {
- assert(lowArg < high);
+ dcheck(lowArg < high);
const workArray = sortState.workArray;
@@ -604,7 +604,7 @@ macro CountAndMakeRun(implicit context: Context, sortState: SortState)(
return runLength;
}
-macro ReverseRange(array: FixedArray, from: Smi, to: Smi) {
+macro ReverseRange(array: FixedArray, from: Smi, to: Smi): void {
let low: Smi = from;
let high: Smi = to - 1;
@@ -624,9 +624,9 @@ MergeAt(implicit context: Context, sortState: SortState)(i: Smi): Smi {
// We are only allowed to either merge the two top-most runs, or leave
// the top most run alone and merge the two next runs.
- assert(stackSize >= 2);
- assert(i >= 0);
- assert(i == stackSize - 2 || i == stackSize - 3);
+ dcheck(stackSize >= 2);
+ dcheck(i >= 0);
+ dcheck(i == stackSize - 2 || i == stackSize - 3);
const workArray = sortState.workArray;
@@ -635,8 +635,8 @@ MergeAt(implicit context: Context, sortState: SortState)(i: Smi): Smi {
let lengthA: Smi = GetPendingRunLength(pendingRuns, i);
const baseB: Smi = GetPendingRunBase(pendingRuns, i + 1);
let lengthB: Smi = GetPendingRunLength(pendingRuns, i + 1);
- assert(lengthA > 0 && lengthB > 0);
- assert(baseA + lengthA == baseB);
+ dcheck(lengthA > 0 && lengthB > 0);
+ dcheck(baseA + lengthA == baseB);
// Record the length of the combined runs; if i is the 3rd-last run now,
// also slide over the last run (which isn't involved in this merge).
@@ -654,18 +654,18 @@ MergeAt(implicit context: Context, sortState: SortState)(i: Smi): Smi {
// because they are already in place.
const keyRight = UnsafeCast<JSAny>(workArray.objects[baseB]);
const k: Smi = GallopRight(workArray, keyRight, baseA, lengthA, 0);
- assert(k >= 0);
+ dcheck(k >= 0);
baseA = baseA + k;
lengthA = lengthA - k;
if (lengthA == 0) return kSuccess;
- assert(lengthA > 0);
+ dcheck(lengthA > 0);
// Where does a end in b? Elements in b after that can be ignored,
// because they are already in place.
const keyLeft = UnsafeCast<JSAny>(workArray.objects[baseA + lengthA - 1]);
lengthB = GallopLeft(workArray, keyLeft, baseB, lengthB, lengthB - 1);
- assert(lengthB >= 0);
+ dcheck(lengthB >= 0);
if (lengthB == 0) return kSuccess;
// Merge what remains of the runs, using a temp array with
@@ -698,8 +698,8 @@ MergeAt(implicit context: Context, sortState: SortState)(i: Smi): Smi {
// is plus infinity. In other words, key belongs at index base + k.
builtin GallopLeft(implicit context: Context, sortState: SortState)(
array: FixedArray, key: JSAny, base: Smi, length: Smi, hint: Smi): Smi {
- assert(length > 0 && base >= 0);
- assert(0 <= hint && hint < length);
+ dcheck(length > 0 && base >= 0);
+ dcheck(0 <= hint && hint < length);
let lastOfs: Smi = 0;
let offset: Smi = 1;
@@ -736,7 +736,7 @@ builtin GallopLeft(implicit context: Context, sortState: SortState)(
} else {
// key <= a[base + hint]: gallop left, until
// a[base + hint - offset] < key <= a[base + hint - lastOfs].
- assert(order >= 0);
+ dcheck(order >= 0);
// a[base + hint] is lowest.
const maxOfs: Smi = hint + 1;
@@ -762,7 +762,7 @@ builtin GallopLeft(implicit context: Context, sortState: SortState)(
offset = hint - tmp;
}
- assert(-1 <= lastOfs && lastOfs < offset && offset <= length);
+ dcheck(-1 <= lastOfs && lastOfs < offset && offset <= length);
// Now a[base+lastOfs] < key <= a[base+offset], so key belongs
// somewhere to the right of lastOfs but no farther right than offset.
@@ -781,8 +781,8 @@ builtin GallopLeft(implicit context: Context, sortState: SortState)(
}
}
// so a[base + offset - 1] < key <= a[base + offset].
- assert(lastOfs == offset);
- assert(0 <= offset && offset <= length);
+ dcheck(lastOfs == offset);
+ dcheck(0 <= offset && offset <= length);
return offset;
}
@@ -797,8 +797,8 @@ builtin GallopLeft(implicit context: Context, sortState: SortState)(
// or kFailure on error.
builtin GallopRight(implicit context: Context, sortState: SortState)(
array: FixedArray, key: JSAny, base: Smi, length: Smi, hint: Smi): Smi {
- assert(length > 0 && base >= 0);
- assert(0 <= hint && hint < length);
+ dcheck(length > 0 && base >= 0);
+ dcheck(0 <= hint && hint < length);
let lastOfs: Smi = 0;
let offset: Smi = 1;
@@ -859,7 +859,7 @@ builtin GallopRight(implicit context: Context, sortState: SortState)(
lastOfs = lastOfs + hint;
offset = offset + hint;
}
- assert(-1 <= lastOfs && lastOfs < offset && offset <= length);
+ dcheck(-1 <= lastOfs && lastOfs < offset && offset <= length);
// Now a[base + lastOfs] <= key < a[base + ofs], so key belongs
// somewhere to the right of lastOfs but no farther right than ofs.
@@ -878,8 +878,8 @@ builtin GallopRight(implicit context: Context, sortState: SortState)(
}
}
// so a[base + offset - 1] <= key < a[base + offset].
- assert(lastOfs == offset);
- assert(0 <= offset && offset <= length);
+ dcheck(lastOfs == offset);
+ dcheck(0 <= offset && offset <= length);
return offset;
}
@@ -890,10 +890,10 @@ builtin GallopRight(implicit context: Context, sortState: SortState)(
// that array[baseA + lengthA - 1] belongs at the end of the merge,
// and should have lengthA <= lengthB.
transitioning macro MergeLow(implicit context: Context, sortState: SortState)(
- baseA: Smi, lengthAArg: Smi, baseB: Smi, lengthBArg: Smi) {
- assert(0 < lengthAArg && 0 < lengthBArg);
- assert(0 <= baseA && 0 < baseB);
- assert(baseA + lengthAArg == baseB);
+ baseA: Smi, lengthAArg: Smi, baseB: Smi, lengthBArg: Smi): void {
+ dcheck(0 < lengthAArg && 0 < lengthBArg);
+ dcheck(0 <= baseA && 0 < baseB);
+ dcheck(baseA + lengthAArg == baseB);
let lengthA: Smi = lengthAArg;
let lengthB: Smi = lengthBArg;
@@ -924,7 +924,7 @@ transitioning macro MergeLow(implicit context: Context, sortState: SortState)(
// TODO(szuend): Replace with something that does not have a runtime
// overhead as soon as its available in Torque.
while (Int32TrueConstant()) {
- assert(lengthA > 1 && lengthB > 0);
+ dcheck(lengthA > 1 && lengthB > 0);
const order = sortState.Compare(
UnsafeCast<JSAny>(workArray.objects[cursorB]),
@@ -959,7 +959,7 @@ transitioning macro MergeLow(implicit context: Context, sortState: SortState)(
while (nofWinsA >= kMinGallopWins || nofWinsB >= kMinGallopWins ||
firstIteration) {
firstIteration = false;
- assert(lengthA > 1 && lengthB > 0);
+ dcheck(lengthA > 1 && lengthB > 0);
minGallop = SmiMax(1, minGallop - 1);
sortState.minGallop = minGallop;
@@ -967,7 +967,7 @@ transitioning macro MergeLow(implicit context: Context, sortState: SortState)(
nofWinsA = GallopRight(
tempArray, UnsafeCast<JSAny>(workArray.objects[cursorB]),
cursorTemp, lengthA, 0);
- assert(nofWinsA >= 0);
+ dcheck(nofWinsA >= 0);
if (nofWinsA > 0) {
Copy(tempArray, cursorTemp, workArray, dest, nofWinsA);
@@ -987,7 +987,7 @@ transitioning macro MergeLow(implicit context: Context, sortState: SortState)(
nofWinsB = GallopLeft(
workArray, UnsafeCast<JSAny>(tempArray.objects[cursorTemp]),
cursorB, lengthB, 0);
- assert(nofWinsB >= 0);
+ dcheck(nofWinsB >= 0);
if (nofWinsB > 0) {
Copy(workArray, cursorB, workArray, dest, nofWinsB);
@@ -1008,7 +1008,7 @@ transitioning macro MergeLow(implicit context: Context, sortState: SortState)(
Copy(tempArray, cursorTemp, workArray, dest, lengthA);
}
} label CopyB {
- assert(lengthA == 1 && lengthB > 0);
+ dcheck(lengthA == 1 && lengthB > 0);
// The last element of run A belongs at the end of the merge.
Copy(workArray, cursorB, workArray, dest, lengthB);
workArray.objects[dest + lengthB] = tempArray.objects[cursorTemp];
@@ -1020,10 +1020,10 @@ transitioning macro MergeLow(implicit context: Context, sortState: SortState)(
// be > 0. Must also have that array[baseA + lengthA - 1] belongs at the
// end of the merge and should have lengthA >= lengthB.
transitioning macro MergeHigh(implicit context: Context, sortState: SortState)(
- baseA: Smi, lengthAArg: Smi, baseB: Smi, lengthBArg: Smi) {
- assert(0 < lengthAArg && 0 < lengthBArg);
- assert(0 <= baseA && 0 < baseB);
- assert(baseA + lengthAArg == baseB);
+ baseA: Smi, lengthAArg: Smi, baseB: Smi, lengthBArg: Smi): void {
+ dcheck(0 < lengthAArg && 0 < lengthBArg);
+ dcheck(0 <= baseA && 0 < baseB);
+ dcheck(baseA + lengthAArg == baseB);
let lengthA: Smi = lengthAArg;
let lengthB: Smi = lengthBArg;
@@ -1055,7 +1055,7 @@ transitioning macro MergeHigh(implicit context: Context, sortState: SortState)(
// TODO(szuend): Replace with something that does not have a runtime
// overhead as soon as its available in Torque.
while (Int32TrueConstant()) {
- assert(lengthA > 0 && lengthB > 1);
+ dcheck(lengthA > 0 && lengthB > 1);
const order = sortState.Compare(
UnsafeCast<JSAny>(tempArray.objects[cursorTemp]),
@@ -1091,7 +1091,7 @@ transitioning macro MergeHigh(implicit context: Context, sortState: SortState)(
firstIteration) {
firstIteration = false;
- assert(lengthA > 0 && lengthB > 1);
+ dcheck(lengthA > 0 && lengthB > 1);
minGallop = SmiMax(1, minGallop - 1);
sortState.minGallop = minGallop;
@@ -1099,7 +1099,7 @@ transitioning macro MergeHigh(implicit context: Context, sortState: SortState)(
let k: Smi = GallopRight(
workArray, UnsafeCast<JSAny>(tempArray.objects[cursorTemp]), baseA,
lengthA, lengthA - 1);
- assert(k >= 0);
+ dcheck(k >= 0);
nofWinsA = lengthA - k;
if (nofWinsA > 0) {
@@ -1116,7 +1116,7 @@ transitioning macro MergeHigh(implicit context: Context, sortState: SortState)(
k = GallopLeft(
tempArray, UnsafeCast<JSAny>(workArray.objects[cursorA]), 0,
lengthB, lengthB - 1);
- assert(k >= 0);
+ dcheck(k >= 0);
nofWinsB = lengthB - k;
if (nofWinsB > 0) {
@@ -1139,11 +1139,11 @@ transitioning macro MergeHigh(implicit context: Context, sortState: SortState)(
}
} label Succeed {
if (lengthB > 0) {
- assert(lengthA == 0);
+ dcheck(lengthA == 0);
Copy(tempArray, 0, workArray, dest - (lengthB - 1), lengthB);
}
} label CopyA {
- assert(lengthB == 1 && lengthA > 0);
+ dcheck(lengthB == 1 && lengthA > 0);
// The first element of run B belongs at the front of the merge.
dest = dest - lengthA;
@@ -1166,14 +1166,14 @@ macro ComputeMinRunLength(nArg: Smi): Smi {
let n: Smi = nArg;
let r: Smi = 0; // Becomes 1 if any 1 bits are shifted off.
- assert(n >= 0);
+ dcheck(n >= 0);
while (n >= 64) {
r = r | (n & 1);
n = n >> 1;
}
const minRunLength: Smi = n + r;
- assert(nArg < 64 || (32 <= minRunLength && minRunLength <= 64));
+ dcheck(nArg < 64 || (32 <= minRunLength && minRunLength <= 64));
return minRunLength;
}
@@ -1198,7 +1198,8 @@ macro RunInvariantEstablished(implicit context: Context)(
// TODO(szuend): Remove unnecessary loads. This macro was refactored to
// improve readability, introducing unnecessary loads in the
// process. Determine if all these extra loads are ok.
-transitioning macro MergeCollapse(context: Context, sortState: SortState) {
+transitioning macro MergeCollapse(
+ context: Context, sortState: SortState): void {
const pendingRuns: FixedArray = sortState.pendingRuns;
// Reload the stack size because MergeAt might change it.
@@ -1226,7 +1227,7 @@ transitioning macro MergeCollapse(context: Context, sortState: SortState) {
// Regardless of invariants, merge all runs on the stack until only one
// remains. This is used at the end of the mergesort.
transitioning macro
-MergeForceCollapse(context: Context, sortState: SortState) {
+MergeForceCollapse(context: Context, sortState: SortState): void {
const pendingRuns: FixedArray = sortState.pendingRuns;
// Reload the stack size becuase MergeAt might change it.
@@ -1243,7 +1244,7 @@ MergeForceCollapse(context: Context, sortState: SortState) {
}
transitioning macro
-ArrayTimSortImpl(context: Context, sortState: SortState, length: Smi) {
+ArrayTimSortImpl(context: Context, sortState: SortState, length: Smi): void {
if (length < 2) return;
let remaining: Smi = length;
@@ -1272,8 +1273,8 @@ ArrayTimSortImpl(context: Context, sortState: SortState, length: Smi) {
}
MergeForceCollapse(context, sortState);
- assert(GetPendingRunsSize(sortState) == 1);
- assert(GetPendingRunLength(sortState.pendingRuns, 0) == length);
+ dcheck(GetPendingRunsSize(sortState) == 1);
+ dcheck(GetPendingRunLength(sortState.pendingRuns, 0) == length);
}
transitioning macro
@@ -1290,7 +1291,7 @@ CompactReceiverElementsIntoWorkArray(
// TODO(szuend): Implement full range sorting, not only up to MaxSmi.
// https://crbug.com/v8/7970.
const receiverLength: Number = sortState.initialReceiverLength;
- assert(IsNumberNormalized(receiverLength));
+ dcheck(IsNumberNormalized(receiverLength));
const sortLength: Smi = TaggedIsSmi(receiverLength) ?
UnsafeCast<Smi>(receiverLength) :
@@ -1322,12 +1323,12 @@ CompactReceiverElementsIntoWorkArray(
transitioning macro
CopyWorkArrayToReceiver(implicit context: Context, sortState: SortState)(
- numberOfNonUndefined: Smi) {
+ numberOfNonUndefined: Smi): void {
const storeFn = sortState.storeFn;
const workArray = sortState.workArray;
- assert(numberOfNonUndefined <= workArray.length);
- assert(
+ dcheck(numberOfNonUndefined <= workArray.length);
+ dcheck(
numberOfNonUndefined + sortState.numberOfUndefined <=
sortState.sortLength);
diff --git a/deps/v8/third_party/zlib/google/zip_reader.cc b/deps/v8/third_party/zlib/google/zip_reader.cc
index 2ad1398499..0c6a93aa51 100644
--- a/deps/v8/third_party/zlib/google/zip_reader.cc
+++ b/deps/v8/third_party/zlib/google/zip_reader.cc
@@ -36,6 +36,10 @@ namespace {
class StringWriterDelegate : public WriterDelegate {
public:
StringWriterDelegate(size_t max_read_bytes, std::string* output);
+
+ StringWriterDelegate(const StringWriterDelegate&) = delete;
+ StringWriterDelegate& operator=(const StringWriterDelegate&) = delete;
+
~StringWriterDelegate() override;
// WriterDelegate methods:
@@ -52,8 +56,6 @@ class StringWriterDelegate : public WriterDelegate {
private:
size_t max_read_bytes_;
std::string* output_;
-
- DISALLOW_COPY_AND_ASSIGN(StringWriterDelegate);
};
StringWriterDelegate::StringWriterDelegate(size_t max_read_bytes,
diff --git a/deps/v8/third_party/zlib/google/zip_reader.h b/deps/v8/third_party/zlib/google/zip_reader.h
index d442d42859..9374bebfe3 100644
--- a/deps/v8/third_party/zlib/google/zip_reader.h
+++ b/deps/v8/third_party/zlib/google/zip_reader.h
@@ -240,6 +240,9 @@ class FileWriterDelegate : public WriterDelegate {
// Constructs a FileWriterDelegate that takes ownership of |file|.
explicit FileWriterDelegate(std::unique_ptr<base::File> file);
+ FileWriterDelegate(const FileWriterDelegate&) = delete;
+ FileWriterDelegate& operator=(const FileWriterDelegate&) = delete;
+
// Truncates the file to the number of bytes written.
~FileWriterDelegate() override;
@@ -267,14 +270,16 @@ class FileWriterDelegate : public WriterDelegate {
std::unique_ptr<base::File> owned_file_;
int64_t file_length_ = 0;
-
- DISALLOW_COPY_AND_ASSIGN(FileWriterDelegate);
};
// A writer delegate that writes a file at a given path.
class FilePathWriterDelegate : public WriterDelegate {
public:
explicit FilePathWriterDelegate(const base::FilePath& output_file_path);
+
+ FilePathWriterDelegate(const FilePathWriterDelegate&) = delete;
+ FilePathWriterDelegate& operator=(const FilePathWriterDelegate&) = delete;
+
~FilePathWriterDelegate() override;
// WriterDelegate methods:
@@ -292,8 +297,6 @@ class FilePathWriterDelegate : public WriterDelegate {
private:
base::FilePath output_file_path_;
base::File file_;
-
- DISALLOW_COPY_AND_ASSIGN(FilePathWriterDelegate);
};
} // namespace zip
diff --git a/deps/v8/third_party/zlib/google/zip_unittest.cc b/deps/v8/third_party/zlib/google/zip_unittest.cc
index 876f3eb181..daaad21a19 100644
--- a/deps/v8/third_party/zlib/google/zip_unittest.cc
+++ b/deps/v8/third_party/zlib/google/zip_unittest.cc
@@ -88,6 +88,9 @@ class VirtualFileSystem : public zip::FileAccessor {
file_tree_[bar2_txt_path] = {};
}
+ VirtualFileSystem(const VirtualFileSystem&) = delete;
+ VirtualFileSystem& operator=(const VirtualFileSystem&) = delete;
+
~VirtualFileSystem() override = default;
private:
@@ -153,8 +156,6 @@ class VirtualFileSystem : public zip::FileAccessor {
std::map<base::FilePath, DirContents> file_tree_;
std::map<base::FilePath, base::File> files_;
-
- DISALLOW_COPY_AND_ASSIGN(VirtualFileSystem);
};
// static
diff --git a/deps/v8/third_party/zlib/google/zip_writer.h b/deps/v8/third_party/zlib/google/zip_writer.h
index c67903d7d1..c58b1b11d5 100644
--- a/deps/v8/third_party/zlib/google/zip_writer.h
+++ b/deps/v8/third_party/zlib/google/zip_writer.h
@@ -44,6 +44,9 @@ class ZipWriter {
static std::unique_ptr<ZipWriter> Create(const base::FilePath& zip_file,
FileAccessor* file_accessor);
+ ZipWriter(const ZipWriter&) = delete;
+ ZipWriter& operator=(const ZipWriter&) = delete;
+
~ZipWriter();
// Sets the optional progress callback. The callback is called once for each
@@ -135,8 +138,6 @@ class ZipWriter {
// Should recursively add directories?
bool recursive_ = false;
-
- DISALLOW_COPY_AND_ASSIGN(ZipWriter);
};
} // namespace internal
diff --git a/deps/v8/tools/cppgc/gen_cmake.py b/deps/v8/tools/cppgc/gen_cmake.py
index 1063455b7f..b4a805c07c 100755
--- a/deps/v8/tools/cppgc/gen_cmake.py
+++ b/deps/v8/tools/cppgc/gen_cmake.py
@@ -245,7 +245,6 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON)
option(CPPGC_ENABLE_OBJECT_NAMES "Enable object names in cppgc for debug purposes" OFF)
option(CPPGC_ENABLE_CAGED_HEAP "Enable heap reservation of size 4GB, only possible for 64bit archs" OFF)
option(CPPGC_ENABLE_VERIFY_HEAP "Enables additional heap verification phases and checks" OFF)
-option(CPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS " Enable assignment checks for Members/Persistents during prefinalizer invocations" OFF)
option(CPPGC_ENABLE_YOUNG_GENERATION "Enable young generation in cppgc" OFF)
set(CPPGC_TARGET_ARCH "x64" CACHE STRING "Target architecture, possible options: x64, x86, arm, arm64, ppc64, s390x, mipsel, mips64el")
@@ -438,9 +437,6 @@ endif()
if(CPPGC_ENABLE_VERIFY_HEAP)
target_compile_definitions({target.name} PRIVATE "-DCPPGC_ENABLE_VERIFY_HEAP")
endif()
-if(CPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS)
- target_compile_definitions({target.name} PRIVATE "-DCPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS")
-endif()
if(CPPGC_ENABLE_YOUNG_GENERATION)
target_compile_definitions({target.name} PRIVATE "-DCPPGC_YOUNG_GENERATION")
endif()"""
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 564c750229..bad1481d1c 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -128,9 +128,9 @@ consts_misc = [
{ 'name': 'prop_kind_mask',
'value': 'PropertyDetails::KindField::kMask' },
{ 'name': 'prop_location_Descriptor',
- 'value': 'kDescriptor' },
+ 'value': 'static_cast<int>(PropertyLocation::kDescriptor)' },
{ 'name': 'prop_location_Field',
- 'value': 'kField' },
+ 'value': 'static_cast<int>(PropertyLocation::kField)' },
{ 'name': 'prop_location_mask',
'value': 'PropertyDetails::LocationField::kMask' },
{ 'name': 'prop_location_shift',
diff --git a/deps/v8/tools/release/list_deprecated.py b/deps/v8/tools/release/list_deprecated.py
index bc479e1653..e52d80effd 100755
--- a/deps/v8/tools/release/list_deprecated.py
+++ b/deps/v8/tools/release/list_deprecated.py
@@ -9,6 +9,8 @@ import re
import subprocess
import sys
from pathlib import Path
+import logging
+from multiprocessing import Pool
RE_GITHASH = re.compile(r"^[0-9a-f]{40}")
RE_AUTHOR_TIME = re.compile(r"^author-time (\d+)$")
@@ -18,132 +20,156 @@ VERSION_CACHE = dict()
RE_VERSION_MAJOR = re.compile(r".*V8_MAJOR_VERSION ([0-9]+)")
RE_VERSION_MINOR = re.compile(r".*V8_MINOR_VERSION ([0-9]+)")
-
-def extract_version(hash):
- if hash in VERSION_CACHE:
- return VERSION_CACHE[hash]
- if hash == '0000000000000000000000000000000000000000':
- return 'HEAD'
- result = subprocess.check_output(
- ['git', 'show', f"{hash}:include/v8-version.h"], encoding='UTF-8')
- major = RE_VERSION_MAJOR.search(result).group(1)
- minor = RE_VERSION_MINOR.search(result).group(1)
- version = f"{major}.{minor}"
- VERSION_CACHE[hash] = version
- return version
-
-
-def get_blame(file_path):
- result = subprocess.check_output(
- ['git', 'blame', '-t', '--line-porcelain', file_path], encoding='UTF-8')
- line_iter = iter(result.splitlines())
- blame_list = list()
- current_blame = None
- while True:
- line = next(line_iter, None)
- if line is None:
- break
- if RE_GITHASH.match(line):
- if current_blame is not None:
- blame_list.append(current_blame)
- hash = line.split(" ")[0]
- current_blame = {
- 'datetime': 0,
- 'filename': None,
- 'content': None,
- 'hash': hash
- }
- continue
- match = RE_AUTHOR_TIME.match(line)
- if match:
- current_blame['datetime'] = datetime.fromtimestamp(int(
- match.groups()[0]))
- continue
- match = RE_FILENAME.match(line)
- if match:
- current_blame['filename'] = match.groups()[0]
- current_blame['content'] = next(line_iter).strip()
- continue
- blame_list.append(current_blame)
- return blame_list
-
-
RE_MACRO_END = re.compile(r"\);")
RE_DEPRECATE_MACRO = re.compile(r"\(.*?,(.*)\);", re.MULTILINE)
-def filter_and_print(blame_list, macro, options):
- before = options.before
- index = 0
- re_macro = re.compile(macro)
- deprecated = list()
- while index < len(blame_list):
- blame = blame_list[index]
- commit_datetime = blame['datetime']
- if commit_datetime >= before:
- index += 1
- continue
- line = blame['content']
- commit_hash = blame['hash']
- match = re_macro.search(line)
- if match:
- pos = match.end()
- start = -1
- parens = 0
- while True:
- if pos >= len(line):
- # Extend to next line
- index = index + 1
- blame = blame_list[index]
- line = line + blame['content']
- if line[pos] == '(':
- parens = parens + 1
- elif line[pos] == ')':
- parens = parens - 1
- if parens == 0:
- # Exclude closing ")
- pos = pos - 2
- break
- elif line[pos] == '"' and start == -1:
- start = pos + 1
- pos = pos + 1
- # Extract content and replace double quotes from merged lines
- content = line[start:pos].strip().replace('""', '')
- deprecated.append((index + 1, commit_datetime, commit_hash, content))
- index = index + 1
- print(f"# Marked as {macro}: {len(deprecated)}")
- for linenumber, commit_datetime, commit_hash, content in deprecated:
- commit_date = commit_datetime.date()
- file_position = (
- f"{options.v8_header}:{linenumber}").rjust(len(options.v8_header) + 5)
- print(f" {file_position}\t{commit_date}\t{commit_hash[:8]}"
- f"\t{extract_version(commit_hash)}\t{content}")
- return len(deprecated)
+class HeaderFile(object):
+ def __init__(self, path):
+ self.path = path
+ self.blame_list = self.get_blame_list()
+
+ @classmethod
+ def get_api_header_files(clazz, options):
+ files = subprocess.check_output(
+ ['git', 'ls-tree', '--name-only', '-r', 'HEAD', options.include_dir],
+ encoding='UTF-8')
+ files = filter(lambda l: l.endswith('.h'), files.splitlines())
+ with Pool(processes=24) as pool:
+ return pool.map(HeaderFile, files)
+
+ def extract_version(self, hash):
+ if hash in VERSION_CACHE:
+ return VERSION_CACHE[hash]
+ if hash == '0000000000000000000000000000000000000000':
+ return 'HEAD'
+ result = subprocess.check_output(
+ ['git', 'show', f"{hash}:include/v8-version.h"], encoding='UTF-8')
+ major = RE_VERSION_MAJOR.search(result).group(1)
+ minor = RE_VERSION_MINOR.search(result).group(1)
+ version = f"{major}.{minor}"
+ VERSION_CACHE[hash] = version
+ return version
+
+ def get_blame_list(self):
+ logging.info(f"blame list for {self.path}")
+ result = subprocess.check_output(
+ ['git', 'blame', '-t', '--line-porcelain', self.path],
+ encoding='UTF-8')
+ line_iter = iter(result.splitlines())
+ blame_list = list()
+ current_blame = None
+ while True:
+ line = next(line_iter, None)
+ if line is None:
+ break
+ if RE_GITHASH.match(line):
+ if current_blame is not None:
+ blame_list.append(current_blame)
+ hash = line.split(" ")[0]
+ current_blame = {
+ 'datetime': 0,
+ 'filename': None,
+ 'content': None,
+ 'hash': hash
+ }
+ continue
+ match = RE_AUTHOR_TIME.match(line)
+ if match:
+ current_blame['datetime'] = datetime.fromtimestamp(
+ int(match.groups()[0]))
+ continue
+ match = RE_FILENAME.match(line)
+ if match:
+ current_blame['filename'] = match.groups()[0]
+ current_blame['content'] = next(line_iter).strip()
+ continue
+ blame_list.append(current_blame)
+ return blame_list
+
+ def filter_and_print(self, macro, options):
+ before = options.before
+ index = 0
+ re_macro = re.compile(macro)
+ deprecated = list()
+ while index < len(self.blame_list):
+ blame = self.blame_list[index]
+ line = blame['content']
+ if line.startswith("#") or line.startswith("//"):
+ index += 1
+ continue
+ commit_datetime = blame['datetime']
+ if commit_datetime >= before:
+ index += 1
+ continue
+ commit_hash = blame['hash']
+ match = re_macro.search(line)
+ if match:
+ pos = match.end()
+ start = -1
+ parens = 0
+ while True:
+ if pos >= len(line):
+ # Extend to next line
+ index = index + 1
+ blame = self.blame_list[index]
+ line = line + blame['content']
+ if line[pos] == '(':
+ parens = parens + 1
+ elif line[pos] == ')':
+ parens = parens - 1
+ if parens == 0:
+ # Exclude closing ")
+ pos = pos - 2
+ break
+ elif line[pos] == '"' and start == -1:
+ start = pos + 1
+ pos = pos + 1
+ # Extract content and replace double quotes from merged lines
+ content = line[start:pos].strip().replace('""', '')
+ deprecated.append((index + 1, commit_datetime, commit_hash, content))
+ index = index + 1
+ if len(deprecated) == 0: return
+ for linenumber, commit_datetime, commit_hash, content in deprecated:
+ commit_date = commit_datetime.date()
+ file_position = (f"{self.path}:{linenumber}").rjust(40)
+ print(f" {file_position}\t{commit_date}\t{commit_hash[:8]}"
+ f"\t{self.extract_version(commit_hash)}\t{content}")
+ return len(deprecated)
def parse_options(args):
parser = argparse.ArgumentParser(
description="Collect deprecation statistics")
- parser.add_argument("v8_header", nargs='?', help="Path to v8.h")
+ parser.add_argument("include_dir", nargs='?', help="Path to includes dir")
parser.add_argument("--before", help="Filter by date")
+ parser.add_argument("--verbose",
+ "-v",
+ help="Verbose logging",
+ action="store_true")
options = parser.parse_args(args)
+ if options.verbose:
+ logging.basicConfig(level=logging.DEBUG)
if options.before:
options.before = datetime.strptime(options.before, '%Y-%m-%d')
else:
options.before = datetime.now()
- if options.v8_header is None:
+ if options.include_dir is None:
base_path = Path(__file__).parent.parent
- options.v8_header = str(
- (base_path / 'include' / 'v8.h').relative_to(base_path))
+ options.include_dir = str((base_path / 'include').relative_to(base_path))
return options
def main(args):
options = parse_options(args)
- blame_list = get_blame(options.v8_header)
- filter_and_print(blame_list, "V8_DEPRECATE_SOON", options)
+ header_files = HeaderFile.get_api_header_files(options)
+ print("V8_DEPRECATE_SOON:")
+ for header in header_files:
+ header.filter_and_print("V8_DEPRECATE_SOON", options)
print("\n")
- filter_and_print(blame_list, "V8_DEPRECATED", options)
+ print("V8_DEPRECATED:")
+ for header in header_files:
+ header.filter_and_print("V8_DEPRECATED", options)
if __name__ == "__main__":
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index f2e72261f0..1e22b298a8 100644
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -459,7 +459,9 @@ class RunnableConfig(GraphConfig):
"""
suite_dir = os.path.abspath(os.path.dirname(suite_path))
bench_dir = os.path.normpath(os.path.join(*self.path))
- os.chdir(os.path.join(suite_dir, bench_dir))
+ cwd = os.path.join(suite_dir, bench_dir)
+ logging.debug('Changing CWD to: %s' % cwd)
+ os.chdir(cwd)
def GetCommandFlags(self, extra_flags=None):
suffix = ['--'] + self.test_flags if self.test_flags else []
@@ -702,6 +704,7 @@ class DesktopPlatform(Platform):
def _Run(self, runnable, count, secondary=False):
shell_dir = self.shell_dir_secondary if secondary else self.shell_dir
cmd = runnable.GetCommand(self.command_prefix, shell_dir, self.extra_flags)
+ logging.debug('Running command: %s' % cmd)
output = cmd.execute()
if output.IsSuccess() and '--prof' in self.extra_flags:
diff --git a/deps/v8/tools/system-analyzer/view/timeline/timeline-track-base.mjs b/deps/v8/tools/system-analyzer/view/timeline/timeline-track-base.mjs
index 4faa8fc3aa..1ef8347088 100644
--- a/deps/v8/tools/system-analyzer/view/timeline/timeline-track-base.mjs
+++ b/deps/v8/tools/system-analyzer/view/timeline/timeline-track-base.mjs
@@ -170,6 +170,9 @@ export class TimelineTrackBase extends V8CustomElement {
}
_updateDimensions() {
+ // No data in this timeline, no need to resize
+ if (!this._timeline) return;
+
const centerOffset = this._timelineBoundingClientRect.width / 2;
const time =
this.relativePositionToTime(this._timelineScrollLeft + centerOffset);
diff --git a/deps/v8/tools/system-analyzer/view/timeline/timeline-track-stacked-base.mjs b/deps/v8/tools/system-analyzer/view/timeline/timeline-track-stacked-base.mjs
index 24b389b959..5d60a5fa09 100644
--- a/deps/v8/tools/system-analyzer/view/timeline/timeline-track-stacked-base.mjs
+++ b/deps/v8/tools/system-analyzer/view/timeline/timeline-track-stacked-base.mjs
@@ -24,7 +24,7 @@ export class TimelineTrackStackedBase extends TimelineTrackBase {
set data(timeline) {
super.data = timeline;
this._contentWidth = 0;
- this._prepareDrawableItems();
+ if (timeline.values.length > 0) this._prepareDrawableItems();
}
_handleDoubleClick(event) {
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index 009b7b23f8..d86dea1cb4 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -376,16 +376,13 @@ class BaseTestRunner(object):
# Progress
parser.add_option("-p", "--progress",
- choices=list(PROGRESS_INDICATORS), default="mono",
+ choices=list(PROGRESS_INDICATORS.keys()), default="mono",
help="The style of progress indicator (verbose, dots, "
"color, mono)")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
parser.add_option('--slow-tests-cutoff', type="int", default=100,
help='Collect N slowest tests')
- parser.add_option("--junitout", help="File name of the JUnit output")
- parser.add_option("--junittestsuite", default="v8tests",
- help="The testsuite name in the JUnit output file")
parser.add_option("--exit-after-n-failures", type="int", default=100,
help="Exit after the first N failures instead of "
"running all tests. Pass 0 to disable this feature.")
@@ -533,7 +530,7 @@ class BaseTestRunner(object):
options.j = multiprocessing.cpu_count()
options.command_prefix = shlex.split(options.command_prefix)
- options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
+ options.extra_flags = sum(list(map(shlex.split, options.extra_flags)), [])
def _process_options(self, options):
pass
@@ -620,7 +617,7 @@ class BaseTestRunner(object):
def expand_test_group(name):
return TEST_MAP.get(name, [name])
- return reduce(list.__add__, map(expand_test_group, args), [])
+ return reduce(list.__add__, list(map(expand_test_group, args)), [])
def _args_to_suite_names(self, args, test_root):
# Use default tests if no test configuration was provided at the cmd line.
@@ -783,7 +780,7 @@ class BaseTestRunner(object):
raise NotImplementedError()
def _prepare_procs(self, procs):
- procs = filter(None, procs)
+ procs = list([_f for _f in procs if _f])
for i in range(0, len(procs) - 1):
procs[i].connect_to(procs[i + 1])
procs[0].setup()
@@ -833,9 +830,6 @@ class BaseTestRunner(object):
def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
- if options.junitout:
- procs.append(progress.JUnitTestProgressIndicator(options.junitout,
- options.junittestsuite))
if options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(self.framework_name))
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
deleted file mode 100644
index 52f31ec422..0000000000
--- a/deps/v8/tools/testrunner/local/junit_output.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import xml.etree.ElementTree as xml
-
-
-class JUnitTestOutput:
- def __init__(self, test_suite_name):
- self.root = xml.Element("testsuite")
- self.root.attrib["name"] = test_suite_name
-
- def HasRunTest(self, test_name, test_cmd, test_duration, test_failure):
- testCaseElement = xml.Element("testcase")
- testCaseElement.attrib["name"] = test_name
- testCaseElement.attrib["cmd"] = test_cmd
- testCaseElement.attrib["time"] = str(round(test_duration, 3))
- if len(test_failure):
- failureElement = xml.Element("failure")
- failureElement.text = test_failure
- testCaseElement.append(failureElement)
- self.root.append(testCaseElement)
-
- def FinishAndWrite(self, f):
- xml.ElementTree(self.root).write(f, "UTF-8")
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index de903752bb..174bd27a5f 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -32,8 +32,8 @@ from __future__ import absolute_import
import os
import re
-from .variants import ALL_VARIANTS
-from .utils import Freeze
+from testrunner.local.variants import ALL_VARIANTS
+from testrunner.local.utils import Freeze
# Possible outcomes
FAIL = "FAIL"
@@ -131,8 +131,8 @@ class StatusFile(object):
for variant in variants:
for rule, value in (
- list(self._rules.get(variant, {}).iteritems()) +
- list(self._prefix_rules.get(variant, {}).iteritems())):
+ list(self._rules.get(variant, {}).items()) +
+ list(self._prefix_rules.get(variant, {}).items())):
if (rule, variant) not in used_rules:
if variant == '':
variant_desc = 'variant independent'
@@ -161,7 +161,7 @@ def _EvalExpression(exp, variables):
try:
return eval(exp, variables)
except NameError as e:
- identifier = re.match("name '(.*)' is not defined", e.message).group(1)
+ identifier = re.match("name '(.*)' is not defined", str(e)).group(1)
assert identifier == "variant", "Unknown identifier: %s" % identifier
return VARIANT_EXPRESSION
@@ -283,7 +283,7 @@ def ReadStatusFile(content, variables):
def _ReadSection(section, variables, rules, prefix_rules):
assert type(section) == dict
- for rule, outcome_list in section.items():
+ for rule, outcome_list in list(section.items()):
assert type(rule) == str
if rule[-1] == '*':
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index a72ef4be61..864d7346fc 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -223,7 +223,7 @@ class TestGenerator(object):
return self
def __next__(self):
- return next(self)
+ return self.next()
def next(self):
return next(self._iterator)
diff --git a/deps/v8/tools/testrunner/local/utils.py b/deps/v8/tools/testrunner/local/utils.py
index 05d1ef7d5e..896f073166 100644
--- a/deps/v8/tools/testrunner/local/utils.py
+++ b/deps/v8/tools/testrunner/local/utils.py
@@ -212,7 +212,7 @@ class FrozenDict(dict):
def Freeze(obj):
if isinstance(obj, dict):
- return FrozenDict((k, Freeze(v)) for k, v in obj.iteritems())
+ return FrozenDict((k, Freeze(v)) for k, v in list(obj.items()))
elif isinstance(obj, set):
return frozenset(obj)
elif isinstance(obj, list):
diff --git a/deps/v8/tools/testrunner/num_fuzzer.py b/deps/v8/tools/testrunner/num_fuzzer.py
index ebf01078fb..ffd2407d92 100755
--- a/deps/v8/tools/testrunner/num_fuzzer.py
+++ b/deps/v8/tools/testrunner/num_fuzzer.py
@@ -124,10 +124,11 @@ class NumFuzzer(base_runner.BaseTestRunner):
def _runner_flags(self):
"""Extra default flags specific to the test runner implementation."""
- flags = ['--no-abort-on-contradictory-flags', '--testing-d8-test-runner']
- if self.infra_staging:
- flags.append('--no-fail')
- return flags
+ return [
+ '--no-abort-on-contradictory-flags',
+ '--testing-d8-test-runner',
+ '--no-fail'
+ ]
def _get_statusfile_variables(self, options):
variables = (
@@ -163,7 +164,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
# TODO(majeski): Improve sharding when combiner is present. Maybe select
# different random seeds for shards instead of splitting tests.
self._create_shard_proc(options),
- ExpectationProc(self.infra_staging),
+ ExpectationProc(),
combiner,
self._create_fuzzer(fuzzer_rng, options),
sigproc,
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index e044c20805..19fbdd6c11 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -30,14 +30,14 @@ import os
import re
import shlex
-from ..outproc import base as outproc
-from ..local import command
-from ..local import statusfile
-from ..local import utils
-from ..local.variants import ALL_VARIANT_FLAGS
-from ..local.variants import INCOMPATIBLE_FLAGS_PER_VARIANT
-from ..local.variants import INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE
-from ..local.variants import INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG
+from testrunner.outproc import base as outproc
+from testrunner.local import command
+from testrunner.local import statusfile
+from testrunner.local import utils
+from testrunner.local.variants import ALL_VARIANT_FLAGS
+from testrunner.local.variants import INCOMPATIBLE_FLAGS_PER_VARIANT
+from testrunner.local.variants import INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE
+from testrunner.local.variants import INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG
FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
@@ -75,6 +75,13 @@ except NameError:
def cmp(x, y): # Python 3
return (x > y) - (x < y)
+def read_file_utf8(file):
+ try: # Python 3
+ with open(file, encoding='utf-8') as f:
+ return f.read()
+ except TypeError: # Python 2
+ with open(file) as f:
+ return f.read()
class TestCase(object):
def __init__(self, suite, path, name, test_config):
@@ -130,8 +137,8 @@ class TestCase(object):
return not is_flag(outcome)
outcomes = self.suite.statusfile.get_outcomes(self.name, self.variant)
- self._statusfile_outcomes = filter(not_flag, outcomes)
- self._statusfile_flags = filter(is_flag, outcomes)
+ self._statusfile_outcomes = list(filter(not_flag, outcomes))
+ self._statusfile_flags = list(filter(is_flag, outcomes))
self._expected_outcomes = (
self._parse_status_file_outcomes(self._statusfile_outcomes))
@@ -407,8 +414,7 @@ class TestCase(object):
return self._get_source_path() is not None
def get_source(self):
- with open(self._get_source_path()) as f:
- return f.read()
+ return read_file_utf8(self._get_source_path())
def _get_source_path(self):
return None
@@ -454,8 +460,7 @@ class D8TestCase(TestCase):
"""Returns for a given file a list of absolute paths of files needed by the
given file.
"""
- with open(file) as f:
- source = f.read()
+ source = read_file_utf8(file)
result = []
def add_path(path):
result.append(os.path.abspath(path.replace('/', os.path.sep)))
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index 50482da70e..08f17e7721 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -16,7 +16,7 @@ import sys
import tempfile
# Adds testrunner to the path hence it has to be imported at the beggining.
-from . import base_runner
+import testrunner.base_runner as base_runner
from testrunner.local import utils
from testrunner.local.variants import ALL_VARIANTS
diff --git a/deps/v8/tools/testrunner/testproc/expectation.py b/deps/v8/tools/testrunner/testproc/expectation.py
index df7a2c2b1a..3abe40e169 100644
--- a/deps/v8/tools/testrunner/testproc/expectation.py
+++ b/deps/v8/tools/testrunner/testproc/expectation.py
@@ -9,15 +9,13 @@ from testrunner.outproc import base as outproc
class ExpectationProc(base.TestProcProducer):
"""Test processor passing tests and results through and forgiving timeouts."""
- def __init__(self, infra_staging):
+ def __init__(self):
super(ExpectationProc, self).__init__('no-timeout')
- self.infra_staging = infra_staging
def _next_test(self, test):
subtest = self._create_subtest(test, 'no_timeout')
subtest.allow_timeouts()
- if self.infra_staging:
- subtest.allow_pass()
+ subtest.allow_pass()
return self._send_test(subtest)
def _result_for(self, test, subtest, result):
diff --git a/deps/v8/tools/testrunner/testproc/filter.py b/deps/v8/tools/testrunner/testproc/filter.py
index 20af0f8407..728af483cb 100644
--- a/deps/v8/tools/testrunner/testproc/filter.py
+++ b/deps/v8/tools/testrunner/testproc/filter.py
@@ -70,7 +70,7 @@ class NameFilterProc(base.TestProcFilter):
else:
self._exact_matches[suitename][path] = True
- for s, globs in self._globs.iteritems():
+ for s, globs in list(self._globs.items()):
if not globs or '*' in globs:
self._globs[s] = ['*']
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index ec97ab226f..c102cddec1 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -15,7 +15,6 @@ import time
from . import base
from . import util
-from ..local import junit_output
def print_failure_header(test, is_flaky=False):
@@ -362,45 +361,6 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
print(("\r" + (" " * last_length) + "\r"), end='')
-class JUnitTestProgressIndicator(ProgressIndicator):
- def __init__(self, junitout, junittestsuite):
- super(JUnitTestProgressIndicator, self).__init__()
- self._requirement = base.DROP_PASS_STDOUT
-
- self.outputter = junit_output.JUnitTestOutput(junittestsuite)
- if junitout:
- self.outfile = open(junitout, "w")
- else:
- self.outfile = sys.stdout
-
- def _on_result_for(self, test, result):
- # TODO(majeski): Support for dummy/grouped results
- fail_text = ""
- output = result.output
- if result.has_unexpected_output:
- stdout = output.stdout.strip()
- if len(stdout):
- fail_text += "stdout:\n%s\n" % stdout
- stderr = output.stderr.strip()
- if len(stderr):
- fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % result.cmd.to_string()
- if output.HasCrashed():
- fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
- if output.HasTimedOut():
- fail_text += "--- TIMEOUT ---"
- self.outputter.HasRunTest(
- test_name=str(test),
- test_cmd=result.cmd.to_string(relative=True),
- test_duration=output.duration,
- test_failure=fail_text)
-
- def finished(self):
- self.outputter.FinishAndWrite(self.outfile)
- if self.outfile != sys.stdout:
- self.outfile.close()
-
-
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, framework_name):
super(JsonTestProgressIndicator, self).__init__()
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index 5693bf147b..406d1860f8 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -173,64 +173,65 @@ INSTANCE_TYPES = {
2072: "JS_ARRAY_CONSTRUCTOR_TYPE",
2073: "JS_PROMISE_CONSTRUCTOR_TYPE",
2074: "JS_REG_EXP_CONSTRUCTOR_TYPE",
- 2075: "JS_ARRAY_ITERATOR_PROTOTYPE_TYPE",
- 2076: "JS_ITERATOR_PROTOTYPE_TYPE",
- 2077: "JS_MAP_ITERATOR_PROTOTYPE_TYPE",
- 2078: "JS_OBJECT_PROTOTYPE_TYPE",
- 2079: "JS_PROMISE_PROTOTYPE_TYPE",
- 2080: "JS_REG_EXP_PROTOTYPE_TYPE",
- 2081: "JS_SET_ITERATOR_PROTOTYPE_TYPE",
- 2082: "JS_SET_PROTOTYPE_TYPE",
- 2083: "JS_STRING_ITERATOR_PROTOTYPE_TYPE",
- 2084: "JS_TYPED_ARRAY_PROTOTYPE_TYPE",
- 2085: "JS_MAP_KEY_ITERATOR_TYPE",
- 2086: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
- 2087: "JS_MAP_VALUE_ITERATOR_TYPE",
- 2088: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
- 2089: "JS_SET_VALUE_ITERATOR_TYPE",
- 2090: "JS_GENERATOR_OBJECT_TYPE",
- 2091: "JS_ASYNC_FUNCTION_OBJECT_TYPE",
- 2092: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
- 2093: "JS_DATA_VIEW_TYPE",
- 2094: "JS_TYPED_ARRAY_TYPE",
- 2095: "JS_MAP_TYPE",
- 2096: "JS_SET_TYPE",
- 2097: "JS_WEAK_MAP_TYPE",
- 2098: "JS_WEAK_SET_TYPE",
- 2099: "JS_ARGUMENTS_OBJECT_TYPE",
- 2100: "JS_ARRAY_TYPE",
- 2101: "JS_ARRAY_BUFFER_TYPE",
- 2102: "JS_ARRAY_ITERATOR_TYPE",
- 2103: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
- 2104: "JS_COLLATOR_TYPE",
- 2105: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 2106: "JS_DATE_TYPE",
- 2107: "JS_DATE_TIME_FORMAT_TYPE",
- 2108: "JS_DISPLAY_NAMES_TYPE",
- 2109: "JS_ERROR_TYPE",
- 2110: "JS_FINALIZATION_REGISTRY_TYPE",
- 2111: "JS_LIST_FORMAT_TYPE",
- 2112: "JS_LOCALE_TYPE",
- 2113: "JS_MESSAGE_OBJECT_TYPE",
- 2114: "JS_NUMBER_FORMAT_TYPE",
- 2115: "JS_PLURAL_RULES_TYPE",
- 2116: "JS_PROMISE_TYPE",
- 2117: "JS_REG_EXP_TYPE",
- 2118: "JS_REG_EXP_STRING_ITERATOR_TYPE",
- 2119: "JS_RELATIVE_TIME_FORMAT_TYPE",
- 2120: "JS_SEGMENT_ITERATOR_TYPE",
- 2121: "JS_SEGMENTER_TYPE",
- 2122: "JS_SEGMENTS_TYPE",
- 2123: "JS_STRING_ITERATOR_TYPE",
- 2124: "JS_V8_BREAK_ITERATOR_TYPE",
- 2125: "JS_WEAK_REF_TYPE",
- 2126: "WASM_GLOBAL_OBJECT_TYPE",
- 2127: "WASM_INSTANCE_OBJECT_TYPE",
- 2128: "WASM_MEMORY_OBJECT_TYPE",
- 2129: "WASM_MODULE_OBJECT_TYPE",
- 2130: "WASM_TABLE_OBJECT_TYPE",
- 2131: "WASM_TAG_OBJECT_TYPE",
- 2132: "WASM_VALUE_OBJECT_TYPE",
+ 2075: "JS_CLASS_CONSTRUCTOR_TYPE",
+ 2076: "JS_ARRAY_ITERATOR_PROTOTYPE_TYPE",
+ 2077: "JS_ITERATOR_PROTOTYPE_TYPE",
+ 2078: "JS_MAP_ITERATOR_PROTOTYPE_TYPE",
+ 2079: "JS_OBJECT_PROTOTYPE_TYPE",
+ 2080: "JS_PROMISE_PROTOTYPE_TYPE",
+ 2081: "JS_REG_EXP_PROTOTYPE_TYPE",
+ 2082: "JS_SET_ITERATOR_PROTOTYPE_TYPE",
+ 2083: "JS_SET_PROTOTYPE_TYPE",
+ 2084: "JS_STRING_ITERATOR_PROTOTYPE_TYPE",
+ 2085: "JS_TYPED_ARRAY_PROTOTYPE_TYPE",
+ 2086: "JS_MAP_KEY_ITERATOR_TYPE",
+ 2087: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
+ 2088: "JS_MAP_VALUE_ITERATOR_TYPE",
+ 2089: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
+ 2090: "JS_SET_VALUE_ITERATOR_TYPE",
+ 2091: "JS_GENERATOR_OBJECT_TYPE",
+ 2092: "JS_ASYNC_FUNCTION_OBJECT_TYPE",
+ 2093: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
+ 2094: "JS_DATA_VIEW_TYPE",
+ 2095: "JS_TYPED_ARRAY_TYPE",
+ 2096: "JS_MAP_TYPE",
+ 2097: "JS_SET_TYPE",
+ 2098: "JS_WEAK_MAP_TYPE",
+ 2099: "JS_WEAK_SET_TYPE",
+ 2100: "JS_ARGUMENTS_OBJECT_TYPE",
+ 2101: "JS_ARRAY_TYPE",
+ 2102: "JS_ARRAY_BUFFER_TYPE",
+ 2103: "JS_ARRAY_ITERATOR_TYPE",
+ 2104: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
+ 2105: "JS_COLLATOR_TYPE",
+ 2106: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 2107: "JS_DATE_TYPE",
+ 2108: "JS_DATE_TIME_FORMAT_TYPE",
+ 2109: "JS_DISPLAY_NAMES_TYPE",
+ 2110: "JS_ERROR_TYPE",
+ 2111: "JS_FINALIZATION_REGISTRY_TYPE",
+ 2112: "JS_LIST_FORMAT_TYPE",
+ 2113: "JS_LOCALE_TYPE",
+ 2114: "JS_MESSAGE_OBJECT_TYPE",
+ 2115: "JS_NUMBER_FORMAT_TYPE",
+ 2116: "JS_PLURAL_RULES_TYPE",
+ 2117: "JS_PROMISE_TYPE",
+ 2118: "JS_REG_EXP_TYPE",
+ 2119: "JS_REG_EXP_STRING_ITERATOR_TYPE",
+ 2120: "JS_RELATIVE_TIME_FORMAT_TYPE",
+ 2121: "JS_SEGMENT_ITERATOR_TYPE",
+ 2122: "JS_SEGMENTER_TYPE",
+ 2123: "JS_SEGMENTS_TYPE",
+ 2124: "JS_STRING_ITERATOR_TYPE",
+ 2125: "JS_V8_BREAK_ITERATOR_TYPE",
+ 2126: "JS_WEAK_REF_TYPE",
+ 2127: "WASM_GLOBAL_OBJECT_TYPE",
+ 2128: "WASM_INSTANCE_OBJECT_TYPE",
+ 2129: "WASM_MEMORY_OBJECT_TYPE",
+ 2130: "WASM_MODULE_OBJECT_TYPE",
+ 2131: "WASM_TABLE_OBJECT_TYPE",
+ 2132: "WASM_TAG_OBJECT_TYPE",
+ 2133: "WASM_VALUE_OBJECT_TYPE",
}
# List of known V8 maps.
@@ -389,7 +390,7 @@ KNOWN_MAPS = {
("read_only_space", 0x05f81): (82, "StoreHandler2Map"),
("read_only_space", 0x05fa9): (82, "StoreHandler3Map"),
("map_space", 0x02119): (1057, "ExternalMap"),
- ("map_space", 0x02141): (2113, "JSMessageObjectMap"),
+ ("map_space", 0x02141): (2114, "JSMessageObjectMap"),
}
# List of known V8 objects.
@@ -474,27 +475,27 @@ KNOWN_OBJECTS = {
("old_space", 0x029b5): "StringSplitCache",
("old_space", 0x02dbd): "RegExpMultipleCache",
("old_space", 0x031c5): "BuiltinsConstantsTable",
- ("old_space", 0x035e5): "AsyncFunctionAwaitRejectSharedFun",
- ("old_space", 0x03609): "AsyncFunctionAwaitResolveSharedFun",
- ("old_space", 0x0362d): "AsyncGeneratorAwaitRejectSharedFun",
- ("old_space", 0x03651): "AsyncGeneratorAwaitResolveSharedFun",
- ("old_space", 0x03675): "AsyncGeneratorYieldResolveSharedFun",
- ("old_space", 0x03699): "AsyncGeneratorReturnResolveSharedFun",
- ("old_space", 0x036bd): "AsyncGeneratorReturnClosedRejectSharedFun",
- ("old_space", 0x036e1): "AsyncGeneratorReturnClosedResolveSharedFun",
- ("old_space", 0x03705): "AsyncIteratorValueUnwrapSharedFun",
- ("old_space", 0x03729): "PromiseAllResolveElementSharedFun",
- ("old_space", 0x0374d): "PromiseAllSettledResolveElementSharedFun",
- ("old_space", 0x03771): "PromiseAllSettledRejectElementSharedFun",
- ("old_space", 0x03795): "PromiseAnyRejectElementSharedFun",
- ("old_space", 0x037b9): "PromiseCapabilityDefaultRejectSharedFun",
- ("old_space", 0x037dd): "PromiseCapabilityDefaultResolveSharedFun",
- ("old_space", 0x03801): "PromiseCatchFinallySharedFun",
- ("old_space", 0x03825): "PromiseGetCapabilitiesExecutorSharedFun",
- ("old_space", 0x03849): "PromiseThenFinallySharedFun",
- ("old_space", 0x0386d): "PromiseThrowerFinallySharedFun",
- ("old_space", 0x03891): "PromiseValueThunkFinallySharedFun",
- ("old_space", 0x038b5): "ProxyRevokeSharedFun",
+ ("old_space", 0x035ed): "AsyncFunctionAwaitRejectSharedFun",
+ ("old_space", 0x03611): "AsyncFunctionAwaitResolveSharedFun",
+ ("old_space", 0x03635): "AsyncGeneratorAwaitRejectSharedFun",
+ ("old_space", 0x03659): "AsyncGeneratorAwaitResolveSharedFun",
+ ("old_space", 0x0367d): "AsyncGeneratorYieldResolveSharedFun",
+ ("old_space", 0x036a1): "AsyncGeneratorReturnResolveSharedFun",
+ ("old_space", 0x036c5): "AsyncGeneratorReturnClosedRejectSharedFun",
+ ("old_space", 0x036e9): "AsyncGeneratorReturnClosedResolveSharedFun",
+ ("old_space", 0x0370d): "AsyncIteratorValueUnwrapSharedFun",
+ ("old_space", 0x03731): "PromiseAllResolveElementSharedFun",
+ ("old_space", 0x03755): "PromiseAllSettledResolveElementSharedFun",
+ ("old_space", 0x03779): "PromiseAllSettledRejectElementSharedFun",
+ ("old_space", 0x0379d): "PromiseAnyRejectElementSharedFun",
+ ("old_space", 0x037c1): "PromiseCapabilityDefaultRejectSharedFun",
+ ("old_space", 0x037e5): "PromiseCapabilityDefaultResolveSharedFun",
+ ("old_space", 0x03809): "PromiseCatchFinallySharedFun",
+ ("old_space", 0x0382d): "PromiseGetCapabilitiesExecutorSharedFun",
+ ("old_space", 0x03851): "PromiseThenFinallySharedFun",
+ ("old_space", 0x03875): "PromiseThrowerFinallySharedFun",
+ ("old_space", 0x03899): "PromiseValueThunkFinallySharedFun",
+ ("old_space", 0x038bd): "ProxyRevokeSharedFun",
}
# Lower 32 bits of first page addresses for various heap spaces.
diff --git a/deps/v8/tools/wasm/update-wasm-spec-tests.sh b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
index df5348eb78..1ab8853a1a 100755
--- a/deps/v8/tools/wasm/update-wasm-spec-tests.sh
+++ b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
@@ -44,6 +44,7 @@ log_and_run mkdir ${TMP_DIR}
log_and_run rm -rf ${JS_API_TEST_DIR}/tests
log_and_run mkdir ${JS_API_TEST_DIR}/tests
+log_and_run mkdir ${JS_API_TEST_DIR}/tests/wpt
log_and_run mkdir ${JS_API_TEST_DIR}/tests/proposals
###############################################################################
@@ -68,6 +69,25 @@ log_and_run cp ${TMP_DIR}/*.js ${SPEC_TEST_DIR}/tests/
log_and_run cp -r ${TMP_DIR}/spec/test/js-api/* ${JS_API_TEST_DIR}/tests
###############################################################################
+# Generate the wpt tests.
+###############################################################################
+
+echo Process wpt
+log_and_run cd ${TMP_DIR}
+log_and_run git clone https://github.com/web-platform-tests/wpt
+log_and_run cp -r wpt/wasm/jsapi/* ${JS_API_TEST_DIR}/tests/wpt
+
+log_and_run cd ${JS_API_TEST_DIR}/tests
+for spec_test_name in $(find ./ -name '*.any.js' -not -wholename '*/wpt/*'); do
+ wpt_test_name="wpt/${spec_test_name}"
+ if [ -f "$wpt_test_name" ] && cmp -s $spec_test_name $wpt_test_name ; then
+ log_and_run rm $wpt_test_name
+ elif [ -f "$wpt_test_name" ]; then
+ echo "keep" $wpt_test_name
+ fi
+done
+
+###############################################################################
# Generate the proposal tests.
###############################################################################
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index f890e67970..0e5d08c80c 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -6,7 +6,7 @@ A Smi balks into a war and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
-The autoroller bought a round of Himbeerbrause. Suddenly.......
+The autoroller bought a round of Himbeerbrause. Suddenly........
The bartender starts to shake the bottles...........................
I can't add trailing whitespaces, so I'm adding this line............
I'm starting to think that just adding trailing whitespaces might not be bad.
@@ -15,4 +15,4 @@ Because whitespaces are not that funny......
Today's answer to life the universe and everything is 12950!
Today's answer to life the universe and everything is 6728!
Today's answer to life the universe and everything is 6728!!
-.
+..